xref: /linux/drivers/scsi/scsi_debug.c (revision c5f3e78d35c00599673e9ba9f2b641969f8667e4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47 
48 #include <net/checksum.h>
49 
50 #include <linux/unaligned.h>
51 
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60 
61 #include "sd.h"
62 #include "scsi_logging.h"
63 
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67 
68 #define MY_NAME "scsi_debug"
69 
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define OVERLAP_ATOMIC_COMMAND_ASC 0x0
73 #define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
74 #define FILEMARK_DETECTED_ASCQ 0x1
75 #define EOP_EOM_DETECTED_ASCQ 0x2
76 #define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4
77 #define EOD_DETECTED_ASCQ 0x5
78 #define LOGICAL_UNIT_NOT_READY 0x4
79 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
80 #define UNRECOVERED_READ_ERR 0x11
81 #define PARAMETER_LIST_LENGTH_ERR 0x1a
82 #define INVALID_OPCODE 0x20
83 #define LBA_OUT_OF_RANGE 0x21
84 #define INVALID_FIELD_IN_CDB 0x24
85 #define INVALID_FIELD_IN_PARAM_LIST 0x26
86 #define WRITE_PROTECTED 0x27
87 #define UA_READY_ASC 0x28
88 #define UA_RESET_ASC 0x29
89 #define UA_CHANGED_ASC 0x2a
90 #define TOO_MANY_IN_PARTITION_ASC 0x3b
91 #define TARGET_CHANGED_ASC 0x3f
92 #define LUNS_CHANGED_ASCQ 0x0e
93 #define INSUFF_RES_ASC 0x55
94 #define INSUFF_RES_ASCQ 0x3
95 #define POWER_ON_RESET_ASCQ 0x0
96 #define POWER_ON_OCCURRED_ASCQ 0x1
97 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
98 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
99 #define CAPACITY_CHANGED_ASCQ 0x9
100 #define SAVING_PARAMS_UNSUP 0x39
101 #define TRANSPORT_PROBLEM 0x4b
102 #define THRESHOLD_EXCEEDED 0x5d
103 #define LOW_POWER_COND_ON 0x5e
104 #define MISCOMPARE_VERIFY_ASC 0x1d
105 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
106 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
107 #define WRITE_ERROR_ASC 0xc
108 #define UNALIGNED_WRITE_ASCQ 0x4
109 #define WRITE_BOUNDARY_ASCQ 0x5
110 #define READ_INVDATA_ASCQ 0x6
111 #define READ_BOUNDARY_ASCQ 0x7
112 #define ATTEMPT_ACCESS_GAP 0x9
113 #define INSUFF_ZONE_ASCQ 0xe
114 /* see drivers/scsi/sense_codes.h */
115 
116 /* Additional Sense Code Qualifier (ASCQ) */
117 #define ACK_NAK_TO 0x3
118 
119 /* Default values for driver parameters */
120 #define DEF_NUM_HOST   1
121 #define DEF_NUM_TGTS   1
122 #define DEF_MAX_LUNS   1
123 /* With these defaults, this driver will make 1 host with 1 target
124  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
125  */
126 #define DEF_ATO 1
127 #define DEF_CDB_LEN 10
128 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
129 #define DEF_DEV_SIZE_PRE_INIT   0
130 #define DEF_DEV_SIZE_MB   8
131 #define DEF_ZBC_DEV_SIZE_MB   128
132 #define DEF_DIF 0
133 #define DEF_DIX 0
134 #define DEF_PER_HOST_STORE false
135 #define DEF_D_SENSE   0
136 #define DEF_EVERY_NTH   0
137 #define DEF_FAKE_RW	0
138 #define DEF_GUARD 0
139 #define DEF_HOST_LOCK 0
140 #define DEF_LBPU 0
141 #define DEF_LBPWS 0
142 #define DEF_LBPWS10 0
143 #define DEF_LBPRZ 1
144 #define DEF_LOWEST_ALIGNED 0
145 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
146 #define DEF_NO_LUN_0   0
147 #define DEF_NUM_PARTS   0
148 #define DEF_OPTS   0
149 #define DEF_OPT_BLKS 1024
150 #define DEF_PHYSBLK_EXP 0
151 #define DEF_OPT_XFERLEN_EXP 0
152 #define DEF_PTYPE   TYPE_DISK
153 #define DEF_RANDOM false
154 #define DEF_REMOVABLE false
155 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
156 #define DEF_SECTOR_SIZE 512
157 #define DEF_UNMAP_ALIGNMENT 0
158 #define DEF_UNMAP_GRANULARITY 1
159 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
160 #define DEF_UNMAP_MAX_DESC 256
161 #define DEF_VIRTUAL_GB   0
162 #define DEF_VPD_USE_HOSTNO 1
163 #define DEF_WRITESAME_LENGTH 0xFFFF
164 #define DEF_ATOMIC_WR 0
165 #define DEF_ATOMIC_WR_MAX_LENGTH 128
166 #define DEF_ATOMIC_WR_ALIGN 2
167 #define DEF_ATOMIC_WR_GRAN 2
168 #define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
169 #define DEF_ATOMIC_WR_MAX_BNDRY 128
170 #define DEF_STRICT 0
171 #define DEF_STATISTICS false
172 #define DEF_SUBMIT_QUEUES 1
173 #define DEF_TUR_MS_TO_READY 0
174 #define DEF_UUID_CTL 0
175 #define JDELAY_OVERRIDDEN -9999
176 
177 /* Default parameters for ZBC drives */
178 #define DEF_ZBC_ZONE_SIZE_MB	128
179 #define DEF_ZBC_MAX_OPEN_ZONES	8
180 #define DEF_ZBC_NR_CONV_ZONES	1
181 
182 /* Default parameters for tape drives */
183 #define TAPE_DEF_DENSITY  0x0
184 #define TAPE_BAD_DENSITY  0x65
185 #define TAPE_DEF_BLKSIZE  0
186 #define TAPE_MIN_BLKSIZE  512
187 #define TAPE_MAX_BLKSIZE  1048576
188 #define TAPE_EW 20
189 #define TAPE_MAX_PARTITIONS 2
190 #define TAPE_UNITS 10000
191 #define TAPE_PARTITION_1_UNITS 1000
192 
193 /* The tape block data definitions */
194 #define TAPE_BLOCK_FM_FLAG   ((u32)0x1 << 30)
195 #define TAPE_BLOCK_EOD_FLAG  ((u32)0x2 << 30)
196 #define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30)
197 #define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK)
198 #define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK)
199 #define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK)
200 #define IS_TAPE_BLOCK_FM(a)   ((a & TAPE_BLOCK_FM_FLAG) != 0)
201 #define IS_TAPE_BLOCK_EOD(a)  ((a & TAPE_BLOCK_EOD_FLAG) != 0)
202 
203 struct tape_block {
204 	u32 fl_size;
205 	unsigned char data[4];
206 };
207 
208 /* Flags for sense data */
209 #define SENSE_FLAG_FILEMARK  0x80
210 #define SENSE_FLAG_EOM 0x40
211 #define SENSE_FLAG_ILI 0x20
212 
213 #define SDEBUG_LUN_0_VAL 0
214 
215 /* bit mask values for sdebug_opts */
216 #define SDEBUG_OPT_NOISE		1
217 #define SDEBUG_OPT_MEDIUM_ERR		2
218 #define SDEBUG_OPT_TIMEOUT		4
219 #define SDEBUG_OPT_RECOVERED_ERR	8
220 #define SDEBUG_OPT_TRANSPORT_ERR	16
221 #define SDEBUG_OPT_DIF_ERR		32
222 #define SDEBUG_OPT_DIX_ERR		64
223 #define SDEBUG_OPT_MAC_TIMEOUT		128
224 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
225 #define SDEBUG_OPT_Q_NOISE		0x200
226 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
227 #define SDEBUG_OPT_RARE_TSF		0x800
228 #define SDEBUG_OPT_N_WCE		0x1000
229 #define SDEBUG_OPT_RESET_NOISE		0x2000
230 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
231 #define SDEBUG_OPT_HOST_BUSY		0x8000
232 #define SDEBUG_OPT_CMD_ABORT		0x10000
233 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
234 			      SDEBUG_OPT_RESET_NOISE)
235 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
236 				  SDEBUG_OPT_TRANSPORT_ERR | \
237 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
238 				  SDEBUG_OPT_SHORT_TRANSFER | \
239 				  SDEBUG_OPT_HOST_BUSY | \
240 				  SDEBUG_OPT_CMD_ABORT)
241 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
242 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
243 
244 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
245  * priority order. In the subset implemented here lower numbers have higher
246  * priority. The UA numbers should be a sequence starting from 0 with
247  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
248 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
249 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
250 #define SDEBUG_UA_BUS_RESET 2
251 #define SDEBUG_UA_MODE_CHANGED 3
252 #define SDEBUG_UA_CAPACITY_CHANGED 4
253 #define SDEBUG_UA_LUNS_CHANGED 5
254 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
255 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
256 #define SDEBUG_UA_NOT_READY_TO_READY 8
257 #define SDEBUG_NUM_UAS 9
258 
259 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
260  * sector on read commands: */
261 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
262 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
263 
264 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
265  * (for response) per submit queue at one time. Can be reduced by max_queue
266  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
267  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
268  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
269  * but cannot exceed SDEBUG_CANQUEUE .
270  */
271 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
272 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
273 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
274 
275 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
276 #define F_D_IN			1	/* Data-in command (e.g. READ) */
277 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
278 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
279 #define F_D_UNKN		8
280 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
281 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
282 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
283 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
284 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
285 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
286 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
287 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
288 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
289 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
290 
291 /* Useful combinations of the above flags */
292 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
293 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
294 #define FF_SA (F_SA_HIGH | F_SA_LOW)
295 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
296 
297 /* Device selection bit mask */
298 #define DS_ALL     0xffffffff
299 #define DS_SBC     (1 << TYPE_DISK)
300 #define DS_SSC     (1 << TYPE_TAPE)
301 #define DS_ZBC     (1 << TYPE_ZBC)
302 
303 #define DS_NO_SSC  (DS_ALL & ~DS_SSC)
304 
305 #define SDEBUG_MAX_PARTS 4
306 
307 #define SDEBUG_MAX_CMD_LEN 32
308 
309 #define SDEB_XA_NOT_IN_USE XA_MARK_1
310 
311 /* Zone types (zbcr05 table 25) */
312 enum sdebug_z_type {
313 	ZBC_ZTYPE_CNV	= 0x1,
314 	ZBC_ZTYPE_SWR	= 0x2,
315 	ZBC_ZTYPE_SWP	= 0x3,
316 	/* ZBC_ZTYPE_SOBR = 0x4, */
317 	ZBC_ZTYPE_GAP	= 0x5,
318 };
319 
320 /* enumeration names taken from table 26, zbcr05 */
321 enum sdebug_z_cond {
322 	ZBC_NOT_WRITE_POINTER	= 0x0,
323 	ZC1_EMPTY		= 0x1,
324 	ZC2_IMPLICIT_OPEN	= 0x2,
325 	ZC3_EXPLICIT_OPEN	= 0x3,
326 	ZC4_CLOSED		= 0x4,
327 	ZC6_READ_ONLY		= 0xd,
328 	ZC5_FULL		= 0xe,
329 	ZC7_OFFLINE		= 0xf,
330 };
331 
332 struct sdeb_zone_state {	/* ZBC: per zone state */
333 	enum sdebug_z_type z_type;
334 	enum sdebug_z_cond z_cond;
335 	bool z_non_seq_resource;
336 	unsigned int z_size;
337 	sector_t z_start;
338 	sector_t z_wp;
339 };
340 
341 enum sdebug_err_type {
342 	ERR_TMOUT_CMD		= 0,	/* make specific scsi command timeout */
343 	ERR_FAIL_QUEUE_CMD	= 1,	/* make specific scsi command's */
344 					/* queuecmd return failed */
345 	ERR_FAIL_CMD		= 2,	/* make specific scsi command's */
346 					/* queuecmd return succeed but */
347 					/* with errors set in scsi_cmnd */
348 	ERR_ABORT_CMD_FAILED	= 3,	/* control return FAILED from */
349 					/* scsi_debug_abort() */
350 	ERR_LUN_RESET_FAILED	= 4,	/* control return FAILED from */
351 					/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
352 };
353 
354 struct sdebug_err_inject {
355 	int type;
356 	struct list_head list;
357 	int cnt;
358 	unsigned char cmd;
359 	struct rcu_head rcu;
360 
361 	union {
362 		/*
363 		 * For ERR_FAIL_QUEUE_CMD
364 		 */
365 		int queuecmd_ret;
366 
367 		/*
368 		 * For ERR_FAIL_CMD
369 		 */
370 		struct {
371 			unsigned char host_byte;
372 			unsigned char driver_byte;
373 			unsigned char status_byte;
374 			unsigned char sense_key;
375 			unsigned char asc;
376 			unsigned char asq;
377 		};
378 	};
379 };
380 
381 struct sdebug_dev_info {
382 	struct list_head dev_list;
383 	unsigned int channel;
384 	unsigned int target;
385 	u64 lun;
386 	uuid_t lu_name;
387 	struct sdebug_host_info *sdbg_host;
388 	unsigned long uas_bm[1];
389 	atomic_t stopped;	/* 1: by SSU, 2: device start */
390 	bool used;
391 
392 	/* For ZBC devices */
393 	bool zoned;
394 	unsigned int zcap;
395 	unsigned int zsize;
396 	unsigned int zsize_shift;
397 	unsigned int nr_zones;
398 	unsigned int nr_conv_zones;
399 	unsigned int nr_seq_zones;
400 	unsigned int nr_imp_open;
401 	unsigned int nr_exp_open;
402 	unsigned int nr_closed;
403 	unsigned int max_open;
404 	ktime_t create_ts;	/* time since bootup that this device was created */
405 	struct sdeb_zone_state *zstate;
406 
407 	/* For tapes */
408 	unsigned int tape_blksize;
409 	unsigned int tape_density;
410 	unsigned char tape_partition;
411 	unsigned char tape_nbr_partitions;
412 	unsigned char tape_pending_nbr_partitions;
413 	unsigned int tape_pending_part_0_size;
414 	unsigned int tape_pending_part_1_size;
415 	unsigned char tape_dce;
416 	unsigned int tape_location[TAPE_MAX_PARTITIONS];
417 	unsigned int tape_eop[TAPE_MAX_PARTITIONS];
418 	struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS];
419 
420 	struct dentry *debugfs_entry;
421 	struct spinlock list_lock;
422 	struct list_head inject_err_list;
423 };
424 
425 struct sdebug_target_info {
426 	bool reset_fail;
427 	struct dentry *debugfs_entry;
428 };
429 
430 struct sdebug_host_info {
431 	struct list_head host_list;
432 	int si_idx;	/* sdeb_store_info (per host) xarray index */
433 	struct Scsi_Host *shost;
434 	struct device dev;
435 	struct list_head dev_info_list;
436 };
437 
438 /* There is an xarray of pointers to this struct's objects, one per host */
439 struct sdeb_store_info {
440 	rwlock_t macc_data_lck;	/* for media data access on this store */
441 	rwlock_t macc_meta_lck;	/* for atomic media meta access on this store */
442 	rwlock_t macc_sector_lck;	/* per-sector media data access on this store */
443 	u8 *storep;		/* user data storage (ram) */
444 	struct t10_pi_tuple *dif_storep; /* protection info */
445 	void *map_storep;	/* provisioning map */
446 };
447 
448 #define dev_to_sdebug_host(d)	\
449 	container_of(d, struct sdebug_host_info, dev)
450 
451 #define shost_to_sdebug_host(shost)	\
452 	dev_to_sdebug_host(shost->dma_dev)
453 
454 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
455 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
456 
457 struct sdebug_defer {
458 	struct hrtimer hrt;
459 	struct execute_work ew;
460 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
461 	int issuing_cpu;
462 	bool aborted;	/* true when blk_abort_request() already called */
463 	enum sdeb_defer_type defer_t;
464 };
465 
466 struct sdebug_scsi_cmd {
467 	spinlock_t   lock;
468 	struct sdebug_defer sd_dp;
469 };
470 
471 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
472 static atomic_t sdebug_completions;  /* count of deferred completions */
473 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
474 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
475 static atomic_t sdeb_inject_pending;
476 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
477 
478 struct opcode_info_t {
479 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
480 				/* for terminating element */
481 	u8 opcode;		/* if num_attached > 0, preferred */
482 	u16 sa;			/* service action */
483 	u32 devsel;		/* device type mask for this definition */
484 	u32 flags;		/* OR-ed set of SDEB_F_* */
485 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
486 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
487 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
488 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
489 };
490 
491 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
492 enum sdeb_opcode_index {
493 	SDEB_I_INVALID_OPCODE =	0,
494 	SDEB_I_INQUIRY = 1,
495 	SDEB_I_REPORT_LUNS = 2,
496 	SDEB_I_REQUEST_SENSE = 3,
497 	SDEB_I_TEST_UNIT_READY = 4,
498 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
499 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
500 	SDEB_I_LOG_SENSE = 7,
501 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
502 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
503 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
504 	SDEB_I_START_STOP = 11,
505 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
506 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
507 	SDEB_I_MAINT_IN = 14,
508 	SDEB_I_MAINT_OUT = 15,
509 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
510 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
511 	SDEB_I_RESERVE = 18,		/* 6, 10 */
512 	SDEB_I_RELEASE = 19,		/* 6, 10 */
513 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
514 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
515 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
516 	SDEB_I_SEND_DIAG = 23,
517 	SDEB_I_UNMAP = 24,
518 	SDEB_I_WRITE_BUFFER = 25,
519 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
520 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
521 	SDEB_I_COMP_WRITE = 28,
522 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
523 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
524 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
525 	SDEB_I_ATOMIC_WRITE_16 = 32,
526 	SDEB_I_READ_BLOCK_LIMITS = 33,
527 	SDEB_I_LOCATE = 34,
528 	SDEB_I_WRITE_FILEMARKS = 35,
529 	SDEB_I_SPACE = 36,
530 	SDEB_I_FORMAT_MEDIUM = 37,
531 	SDEB_I_ERASE = 38,
532 	SDEB_I_LAST_ELEM_P1 = 39,	/* keep this last (previous + 1) */
533 };
534 
535 
536 static const unsigned char opcode_ind_arr[256] = {
537 /* 0x0; 0x0->0x1f: 6 byte cdbs */
538 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
539 	    SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0,
540 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
541 	SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
542 	    SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
543 	0, SDEB_I_ERASE, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
544 	    SDEB_I_ALLOW_REMOVAL, 0,
545 /* 0x20; 0x20->0x3f: 10 byte cdbs */
546 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
547 	SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY,
548 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
549 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
550 /* 0x40; 0x40->0x5f: 10 byte cdbs */
551 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
552 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
553 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
554 	    SDEB_I_RELEASE,
555 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
556 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
557 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
558 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
559 	0, SDEB_I_VARIABLE_LEN,
560 /* 0x80; 0x80->0x9f: 16 byte cdbs */
561 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
562 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
563 	0, 0, 0, SDEB_I_VERIFY,
564 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
565 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
566 	0, 0, 0, 0,
567 	SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
568 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
569 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
570 	     SDEB_I_MAINT_OUT, 0, 0, 0,
571 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
572 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
573 	0, 0, 0, 0, 0, 0, 0, 0,
574 	0, 0, 0, 0, 0, 0, 0, 0,
575 /* 0xc0; 0xc0->0xff: vendor specific */
576 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
577 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
578 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
579 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
580 };
581 
582 /*
583  * The following "response" functions return the SCSI mid-level's 4 byte
584  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
585  * command completion, they can mask their return value with
586  * SDEG_RES_IMMED_MASK .
587  */
588 #define SDEG_RES_IMMED_MASK 0x40000000
589 
590 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
591 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
592 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
593 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
594 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
595 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
596 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
597 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
598 static int resp_read_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
599 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
600 static int resp_write_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
601 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
602 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
603 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
604 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
605 static int resp_get_stream_status(struct scsi_cmnd *scp,
606 				  struct sdebug_dev_info *devip);
607 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
608 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
609 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
610 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
611 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
612 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
613 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
614 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
615 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
616 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
617 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
618 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
619 static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
620 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
621 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
622 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
623 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
624 static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
625 static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
626 static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
627 static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
628 static int resp_read_position(struct scsi_cmnd *, struct sdebug_dev_info *);
629 static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
630 static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
631 static int resp_erase(struct scsi_cmnd *, struct sdebug_dev_info *);
632 
633 static int sdebug_do_add_host(bool mk_new_store);
634 static int sdebug_add_host_helper(int per_host_idx);
635 static void sdebug_do_remove_host(bool the_end);
636 static int sdebug_add_store(void);
637 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
638 static void sdebug_erase_all_stores(bool apart_from_first);
639 
640 /*
641  * The following are overflow arrays for cdbs that "hit" the same index in
642  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
643  * should be placed in opcode_info_arr[], the others should be placed here.
644  */
645 static const struct opcode_info_t msense_iarr[] = {
646 	{0, 0x1a, 0, DS_ALL, F_D_IN, NULL, NULL,
647 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
648 };
649 
650 static const struct opcode_info_t mselect_iarr[] = {
651 	{0, 0x15, 0, DS_ALL, F_D_OUT, NULL, NULL,
652 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
653 };
654 
655 static const struct opcode_info_t read_iarr[] = {
656 	{0, 0x28, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
657 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
658 	     0, 0, 0, 0} },
659 	{0, 0x8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) disk */
660 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
661 	{0, 0x8, 0, DS_SSC, F_D_IN | FF_MEDIA_IO, resp_read_tape, NULL, /* READ(6) tape */
662 	    {6,  0x03, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
663 	{0, 0xa8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
664 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
665 	     0xc7, 0, 0, 0, 0} },
666 };
667 
668 static const struct opcode_info_t write_iarr[] = {
669 	{0, 0x2a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
670 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
671 		   0, 0, 0, 0, 0, 0} },
672 	{0, 0xa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) disk */
673 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
674 		   0, 0, 0} },
675 	{0, 0xa, 0, DS_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_tape, /* WRITE(6) tape */
676 	    NULL, {6,  0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
677 		   0, 0, 0} },
678 	{0, 0xaa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
679 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
680 		   0xbf, 0xc7, 0, 0, 0, 0} },
681 };
682 
683 static const struct opcode_info_t verify_iarr[] = {
684 	{0, 0x2f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
685 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
686 		   0, 0, 0, 0, 0, 0} },
687 };
688 
689 static const struct opcode_info_t sa_in_16_iarr[] = {
690 	{0, 0x9e, 0x12, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
691 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
692 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
693 	{0, 0x9e, 0x16, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
694 	    {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
695 	     0, 0} },	/* GET STREAM STATUS */
696 };
697 
698 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
699 	{0, 0x7f, 0xb, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
700 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
701 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
702 	{0, 0x7f, 0x11, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
703 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
704 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
705 };
706 
707 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
708 	{0, 0xa3, 0xc, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
709 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
710 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
711 	{0, 0xa3, 0xd, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
712 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
713 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
714 };
715 
716 static const struct opcode_info_t write_same_iarr[] = {
717 	{0, 0x93, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
718 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
719 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
720 };
721 
722 static const struct opcode_info_t reserve_iarr[] = {
723 	{0, 0x16, 0, DS_ALL, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
724 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
725 };
726 
727 static const struct opcode_info_t release_iarr[] = {
728 	{0, 0x17, 0, DS_ALL, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
729 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
730 };
731 
732 static const struct opcode_info_t sync_cache_iarr[] = {
733 	{0, 0x91, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
734 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
735 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
736 };
737 
738 static const struct opcode_info_t pre_fetch_iarr[] = {
739 	{0, 0x90, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
740 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
741 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
742 	{0, 0x34, 0, DS_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_read_position, NULL,
743 	    {10,  0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0, 0,
744 	     0, 0, 0, 0} },				/* READ POSITION (10) */
745 };
746 
747 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
748 	{0, 0x94, 0x1, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
749 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
750 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
751 	{0, 0x94, 0x2, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
752 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
753 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
754 	{0, 0x94, 0x4, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
755 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
756 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
757 };
758 
759 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
760 	{0, 0x95, 0x6, DS_NO_SSC, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
761 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
762 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
763 };
764 
765 
766 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
767  * plus the terminating elements for logic that scans this table such as
768  * REPORT SUPPORTED OPERATION CODES. */
769 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
770 /* 0 */
771 	{0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
772 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
773 	{0, 0x12, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
774 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
775 	{0, 0xa0, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
776 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
777 	     0, 0} },					/* REPORT LUNS */
778 	{0, 0x3, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_requests, NULL,
779 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
780 	{0, 0x0, 0, DS_ALL, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
781 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
782 /* 5 */
783 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, DS_ALL, F_D_IN,	/* MODE SENSE(10) */
784 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
785 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
786 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, DS_ALL, F_D_OUT,	/* MODE SELECT(10) */
787 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
788 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
789 	{0, 0x4d, 0, DS_NO_SSC, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
790 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
791 	     0, 0, 0} },
792 	{0, 0x25, 0, DS_NO_SSC, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
793 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
794 	     0, 0} },
795 	{ARRAY_SIZE(read_iarr), 0x88, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, /* READ(16) */
796 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
797 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
798 /* 10 */
799 	{ARRAY_SIZE(write_iarr), 0x8a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
800 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
801 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
802 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
803 	{0, 0x1b, 0, DS_ALL, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
804 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
805 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, DS_NO_SSC, F_SA_LOW | F_D_IN,
806 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
807 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
808 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
809 	{0, 0x9f, 0x12, DS_NO_SSC, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
810 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
811 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
812 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, DS_ALL, F_SA_LOW | F_D_IN,
813 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
814 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
815 				0xff, 0, 0xc7, 0, 0, 0, 0} },
816 /* 15 */
817 	{0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
818 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
819 	{ARRAY_SIZE(verify_iarr), 0x8f, 0, DS_NO_SSC,
820 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
821 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
822 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
823 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, DS_NO_SSC, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
824 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
825 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
826 	     0xff, 0xff} },
827 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, DS_ALL, F_D_OUT,
828 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
829 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
830 	     0} },
831 	{ARRAY_SIZE(release_iarr), 0x57, 0, DS_ALL, F_D_OUT,
832 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
833 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
834 	     0} },
835 /* 20 */
836 	{0, 0x1e, 0, DS_ALL, 0, NULL, NULL, /* ALLOW REMOVAL */
837 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
838 	{0, 0x1, 0, DS_SSC, 0, resp_rewind, NULL,
839 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
840 	{0, 0, 0, DS_NO_SSC, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
841 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
842 	{0, 0x1d, 0, DS_ALL, F_D_OUT, NULL, NULL,      /* SEND DIAGNOSTIC */
843 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
844 	{0, 0x42, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
845 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
846 /* 25 */
847 	{0, 0x3b, 0, DS_NO_SSC, F_D_OUT_MAYBE, resp_write_buffer, NULL,
848 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
849 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
850 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO,
851 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
852 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
853 		 0, 0, 0, 0, 0} },
854 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS,
855 	    resp_sync_cache, sync_cache_iarr,
856 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
857 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
858 	{0, 0x89, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
859 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
860 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
861 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO,
862 	    resp_pre_fetch, pre_fetch_iarr,
863 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
864 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
865 						/* READ POSITION (10) */
866 
867 /* 30 */
868 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
869 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
870 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
871 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
872 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
873 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
874 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
875 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
876 /* 32 */
877 	{0, 0x9c, 0x0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
878 	    resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
879 		{16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
880 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
881 	{0, 0x05, 0, DS_SSC, F_D_IN, resp_read_blklimits, NULL,    /* READ BLOCK LIMITS (6) */
882 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
883 	{0, 0x2b, 0, DS_SSC, F_D_UNKN, resp_locate, NULL,	   /* LOCATE (10) */
884 	    {10,  0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
885 	     0, 0, 0, 0} },
886 	{0, 0x10, 0, DS_SSC, F_D_IN, resp_write_filemarks, NULL,   /* WRITE FILEMARKS (6) */
887 	    {6,  0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
888 	{0, 0x11, 0, DS_SSC, F_D_IN, resp_space, NULL,    /* SPACE (6) */
889 	    {6,  0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
890 	{0, 0x4, 0, DS_SSC, 0, resp_format_medium, NULL,  /* FORMAT MEDIUM (6) */
891 	    {6,  0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
892 	{0, 0x19, 0, DS_SSC, F_D_IN, resp_erase, NULL,    /* ERASE (6) */
893 	    {6,  0x03, 0x33, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
894 /* 39 */
895 /* sentinel */
896 	{0xff, 0, 0, 0, 0, NULL, NULL,		/* terminating element */
897 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
898 };
899 
900 static int sdebug_num_hosts;
901 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
902 static int sdebug_ato = DEF_ATO;
903 static int sdebug_cdb_len = DEF_CDB_LEN;
904 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
905 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
906 static int sdebug_dif = DEF_DIF;
907 static int sdebug_dix = DEF_DIX;
908 static int sdebug_dsense = DEF_D_SENSE;
909 static int sdebug_every_nth = DEF_EVERY_NTH;
910 static int sdebug_fake_rw = DEF_FAKE_RW;
911 static unsigned int sdebug_guard = DEF_GUARD;
912 static int sdebug_host_max_queue;	/* per host */
913 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
914 static int sdebug_max_luns = DEF_MAX_LUNS;
915 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
916 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
917 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
918 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
919 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
920 static int sdebug_no_uld;
921 static int sdebug_num_parts = DEF_NUM_PARTS;
922 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
923 static int sdebug_opt_blks = DEF_OPT_BLKS;
924 static int sdebug_opts = DEF_OPTS;
925 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
926 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
927 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
928 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
929 static int sdebug_sector_size = DEF_SECTOR_SIZE;
930 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
931 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
932 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
933 static unsigned int sdebug_lbpu = DEF_LBPU;
934 static unsigned int sdebug_lbpws = DEF_LBPWS;
935 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
936 static unsigned int sdebug_lbprz = DEF_LBPRZ;
937 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
938 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
939 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
940 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
941 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
942 static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
943 static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
944 static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
945 static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
946 static unsigned int sdebug_atomic_wr_max_length_bndry =
947 			DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
948 static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
949 static int sdebug_uuid_ctl = DEF_UUID_CTL;
950 static bool sdebug_random = DEF_RANDOM;
951 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
952 static bool sdebug_removable = DEF_REMOVABLE;
953 static bool sdebug_clustering;
954 static bool sdebug_host_lock = DEF_HOST_LOCK;
955 static bool sdebug_strict = DEF_STRICT;
956 static bool sdebug_any_injecting_opt;
957 static bool sdebug_no_rwlock;
958 static bool sdebug_verbose;
959 static bool have_dif_prot;
960 static bool write_since_sync;
961 static bool sdebug_statistics = DEF_STATISTICS;
962 static bool sdebug_wp;
963 static bool sdebug_allow_restart;
964 static enum {
965 	BLK_ZONED_NONE	= 0,
966 	BLK_ZONED_HA	= 1,
967 	BLK_ZONED_HM	= 2,
968 } sdeb_zbc_model = BLK_ZONED_NONE;
969 static char *sdeb_zbc_model_s;
970 
971 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
972 			  SAM_LUN_AM_FLAT = 0x1,
973 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
974 			  SAM_LUN_AM_EXTENDED = 0x3};
975 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
976 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
977 
978 static unsigned int sdebug_store_sectors;
979 static sector_t sdebug_capacity;	/* in sectors */
980 
981 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
982    may still need them */
983 static int sdebug_heads;		/* heads per disk */
984 static int sdebug_cylinders_per;	/* cylinders per surface */
985 static int sdebug_sectors_per;		/* sectors per cylinder */
986 
987 static LIST_HEAD(sdebug_host_list);
988 static DEFINE_MUTEX(sdebug_host_list_mutex);
989 
990 static struct xarray per_store_arr;
991 static struct xarray *per_store_ap = &per_store_arr;
992 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
993 static int sdeb_most_recent_idx = -1;
994 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
995 
996 static unsigned long map_size;
997 static int num_aborts;
998 static int num_dev_resets;
999 static int num_target_resets;
1000 static int num_bus_resets;
1001 static int num_host_resets;
1002 static int dix_writes;
1003 static int dix_reads;
1004 static int dif_errors;
1005 
1006 /* ZBC global data */
1007 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
1008 static int sdeb_zbc_zone_cap_mb;
1009 static int sdeb_zbc_zone_size_mb;
1010 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
1011 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
1012 
1013 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
1014 static int poll_queues; /* iouring iopoll interface.*/
1015 
1016 static atomic_long_t writes_by_group_number[64];
1017 
1018 static char sdebug_proc_name[] = MY_NAME;
1019 static const char *my_name = MY_NAME;
1020 
1021 static const struct bus_type pseudo_lld_bus;
1022 
1023 static struct device_driver sdebug_driverfs_driver = {
1024 	.name 		= sdebug_proc_name,
1025 	.bus		= &pseudo_lld_bus,
1026 };
1027 
1028 static const int check_condition_result =
1029 	SAM_STAT_CHECK_CONDITION;
1030 
1031 static const int illegal_condition_result =
1032 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
1033 
1034 static const int device_qfull_result =
1035 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
1036 
1037 static const int condition_met_result = SAM_STAT_CONDITION_MET;
1038 
1039 static struct dentry *sdebug_debugfs_root;
1040 static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
1041 
sdebug_get_devsel(struct scsi_device * sdp)1042 static u32 sdebug_get_devsel(struct scsi_device *sdp)
1043 {
1044 	unsigned char devtype = sdp->type;
1045 	u32 devsel;
1046 
1047 	if (devtype < 32)
1048 		devsel = (1 << devtype);
1049 	else
1050 		devsel = DS_ALL;
1051 
1052 	return devsel;
1053 }
1054 
sdebug_err_free(struct rcu_head * head)1055 static void sdebug_err_free(struct rcu_head *head)
1056 {
1057 	struct sdebug_err_inject *inject =
1058 		container_of(head, typeof(*inject), rcu);
1059 
1060 	kfree(inject);
1061 }
1062 
sdebug_err_add(struct scsi_device * sdev,struct sdebug_err_inject * new)1063 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
1064 {
1065 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1066 	struct sdebug_err_inject *err;
1067 
1068 	spin_lock(&devip->list_lock);
1069 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1070 		if (err->type == new->type && err->cmd == new->cmd) {
1071 			list_del_rcu(&err->list);
1072 			call_rcu(&err->rcu, sdebug_err_free);
1073 		}
1074 	}
1075 
1076 	list_add_tail_rcu(&new->list, &devip->inject_err_list);
1077 	spin_unlock(&devip->list_lock);
1078 }
1079 
sdebug_err_remove(struct scsi_device * sdev,const char * buf,size_t count)1080 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
1081 {
1082 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1083 	struct sdebug_err_inject *err;
1084 	int type;
1085 	unsigned char cmd;
1086 
1087 	if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
1088 		kfree(buf);
1089 		return -EINVAL;
1090 	}
1091 
1092 	spin_lock(&devip->list_lock);
1093 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1094 		if (err->type == type && err->cmd == cmd) {
1095 			list_del_rcu(&err->list);
1096 			call_rcu(&err->rcu, sdebug_err_free);
1097 			spin_unlock(&devip->list_lock);
1098 			kfree(buf);
1099 			return count;
1100 		}
1101 	}
1102 	spin_unlock(&devip->list_lock);
1103 
1104 	kfree(buf);
1105 	return -EINVAL;
1106 }
1107 
sdebug_error_show(struct seq_file * m,void * p)1108 static int sdebug_error_show(struct seq_file *m, void *p)
1109 {
1110 	struct scsi_device *sdev = (struct scsi_device *)m->private;
1111 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1112 	struct sdebug_err_inject *err;
1113 
1114 	seq_puts(m, "Type\tCount\tCommand\n");
1115 
1116 	rcu_read_lock();
1117 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1118 		switch (err->type) {
1119 		case ERR_TMOUT_CMD:
1120 		case ERR_ABORT_CMD_FAILED:
1121 		case ERR_LUN_RESET_FAILED:
1122 			seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
1123 				err->cmd);
1124 		break;
1125 
1126 		case ERR_FAIL_QUEUE_CMD:
1127 			seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1128 				err->cnt, err->cmd, err->queuecmd_ret);
1129 		break;
1130 
1131 		case ERR_FAIL_CMD:
1132 			seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1133 				err->type, err->cnt, err->cmd,
1134 				err->host_byte, err->driver_byte,
1135 				err->status_byte, err->sense_key,
1136 				err->asc, err->asq);
1137 		break;
1138 		}
1139 	}
1140 	rcu_read_unlock();
1141 
1142 	return 0;
1143 }
1144 
sdebug_error_open(struct inode * inode,struct file * file)1145 static int sdebug_error_open(struct inode *inode, struct file *file)
1146 {
1147 	return single_open(file, sdebug_error_show, inode->i_private);
1148 }
1149 
sdebug_error_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1150 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1151 		size_t count, loff_t *ppos)
1152 {
1153 	char *buf;
1154 	unsigned int inject_type;
1155 	struct sdebug_err_inject *inject;
1156 	struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1157 
1158 	buf = kzalloc(count + 1, GFP_KERNEL);
1159 	if (!buf)
1160 		return -ENOMEM;
1161 
1162 	if (copy_from_user(buf, ubuf, count)) {
1163 		kfree(buf);
1164 		return -EFAULT;
1165 	}
1166 
1167 	if (buf[0] == '-')
1168 		return sdebug_err_remove(sdev, buf, count);
1169 
1170 	if (sscanf(buf, "%d", &inject_type) != 1) {
1171 		kfree(buf);
1172 		return -EINVAL;
1173 	}
1174 
1175 	inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1176 	if (!inject) {
1177 		kfree(buf);
1178 		return -ENOMEM;
1179 	}
1180 
1181 	switch (inject_type) {
1182 	case ERR_TMOUT_CMD:
1183 	case ERR_ABORT_CMD_FAILED:
1184 	case ERR_LUN_RESET_FAILED:
1185 		if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1186 			   &inject->cmd) != 3)
1187 			goto out_error;
1188 	break;
1189 
1190 	case ERR_FAIL_QUEUE_CMD:
1191 		if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1192 			   &inject->cmd, &inject->queuecmd_ret) != 4)
1193 			goto out_error;
1194 	break;
1195 
1196 	case ERR_FAIL_CMD:
1197 		if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1198 			   &inject->type, &inject->cnt, &inject->cmd,
1199 			   &inject->host_byte, &inject->driver_byte,
1200 			   &inject->status_byte, &inject->sense_key,
1201 			   &inject->asc, &inject->asq) != 9)
1202 			goto out_error;
1203 	break;
1204 
1205 	default:
1206 		goto out_error;
1207 	break;
1208 	}
1209 
1210 	kfree(buf);
1211 	sdebug_err_add(sdev, inject);
1212 
1213 	return count;
1214 
1215 out_error:
1216 	kfree(buf);
1217 	kfree(inject);
1218 	return -EINVAL;
1219 }
1220 
1221 static const struct file_operations sdebug_error_fops = {
1222 	.open	= sdebug_error_open,
1223 	.read	= seq_read,
1224 	.write	= sdebug_error_write,
1225 	.release = single_release,
1226 };
1227 
sdebug_target_reset_fail_show(struct seq_file * m,void * p)1228 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1229 {
1230 	struct scsi_target *starget = (struct scsi_target *)m->private;
1231 	struct sdebug_target_info *targetip =
1232 		(struct sdebug_target_info *)starget->hostdata;
1233 
1234 	if (targetip)
1235 		seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1236 
1237 	return 0;
1238 }
1239 
sdebug_target_reset_fail_open(struct inode * inode,struct file * file)1240 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1241 {
1242 	return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1243 }
1244 
sdebug_target_reset_fail_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1245 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1246 		const char __user *ubuf, size_t count, loff_t *ppos)
1247 {
1248 	int ret;
1249 	struct scsi_target *starget =
1250 		(struct scsi_target *)file->f_inode->i_private;
1251 	struct sdebug_target_info *targetip =
1252 		(struct sdebug_target_info *)starget->hostdata;
1253 
1254 	if (targetip) {
1255 		ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1256 		return ret < 0 ? ret : count;
1257 	}
1258 	return -ENODEV;
1259 }
1260 
1261 static const struct file_operations sdebug_target_reset_fail_fops = {
1262 	.open	= sdebug_target_reset_fail_open,
1263 	.read	= seq_read,
1264 	.write	= sdebug_target_reset_fail_write,
1265 	.release = single_release,
1266 };
1267 
sdebug_target_alloc(struct scsi_target * starget)1268 static int sdebug_target_alloc(struct scsi_target *starget)
1269 {
1270 	struct sdebug_target_info *targetip;
1271 
1272 	targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1273 	if (!targetip)
1274 		return -ENOMEM;
1275 
1276 	async_synchronize_full_domain(&sdebug_async_domain);
1277 
1278 	targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1279 				sdebug_debugfs_root);
1280 
1281 	debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1282 				&sdebug_target_reset_fail_fops);
1283 
1284 	starget->hostdata = targetip;
1285 
1286 	return 0;
1287 }
1288 
sdebug_tartget_cleanup_async(void * data,async_cookie_t cookie)1289 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1290 {
1291 	struct sdebug_target_info *targetip = data;
1292 
1293 	debugfs_remove(targetip->debugfs_entry);
1294 	kfree(targetip);
1295 }
1296 
sdebug_target_destroy(struct scsi_target * starget)1297 static void sdebug_target_destroy(struct scsi_target *starget)
1298 {
1299 	struct sdebug_target_info *targetip;
1300 
1301 	targetip = (struct sdebug_target_info *)starget->hostdata;
1302 	if (targetip) {
1303 		starget->hostdata = NULL;
1304 		async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1305 				&sdebug_async_domain);
1306 	}
1307 }
1308 
1309 /* Only do the extra work involved in logical block provisioning if one or
1310  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1311  * real reads and writes (i.e. not skipping them for speed).
1312  */
scsi_debug_lbp(void)1313 static inline bool scsi_debug_lbp(void)
1314 {
1315 	return 0 == sdebug_fake_rw &&
1316 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1317 }
1318 
scsi_debug_atomic_write(void)1319 static inline bool scsi_debug_atomic_write(void)
1320 {
1321 	return sdebug_fake_rw == 0 && sdebug_atomic_wr;
1322 }
1323 
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)1324 static void *lba2fake_store(struct sdeb_store_info *sip,
1325 			    unsigned long long lba)
1326 {
1327 	struct sdeb_store_info *lsip = sip;
1328 
1329 	lba = do_div(lba, sdebug_store_sectors);
1330 	if (!sip || !sip->storep) {
1331 		WARN_ON_ONCE(true);
1332 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1333 	}
1334 	return lsip->storep + lba * sdebug_sector_size;
1335 }
1336 
dif_store(struct sdeb_store_info * sip,sector_t sector)1337 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1338 				      sector_t sector)
1339 {
1340 	sector = sector_div(sector, sdebug_store_sectors);
1341 
1342 	return sip->dif_storep + sector;
1343 }
1344 
sdebug_max_tgts_luns(void)1345 static void sdebug_max_tgts_luns(void)
1346 {
1347 	struct sdebug_host_info *sdbg_host;
1348 	struct Scsi_Host *hpnt;
1349 
1350 	mutex_lock(&sdebug_host_list_mutex);
1351 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1352 		hpnt = sdbg_host->shost;
1353 		if ((hpnt->this_id >= 0) &&
1354 		    (sdebug_num_tgts > hpnt->this_id))
1355 			hpnt->max_id = sdebug_num_tgts + 1;
1356 		else
1357 			hpnt->max_id = sdebug_num_tgts;
1358 		/* sdebug_max_luns; */
1359 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1360 	}
1361 	mutex_unlock(&sdebug_host_list_mutex);
1362 }
1363 
1364 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1365 
1366 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)1367 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1368 				 enum sdeb_cmd_data c_d,
1369 				 int in_byte, int in_bit)
1370 {
1371 	unsigned char *sbuff;
1372 	u8 sks[4];
1373 	int sl, asc;
1374 
1375 	sbuff = scp->sense_buffer;
1376 	if (!sbuff) {
1377 		sdev_printk(KERN_ERR, scp->device,
1378 			    "%s: sense_buffer is NULL\n", __func__);
1379 		return;
1380 	}
1381 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1382 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1383 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1384 	memset(sks, 0, sizeof(sks));
1385 	sks[0] = 0x80;
1386 	if (c_d)
1387 		sks[0] |= 0x40;
1388 	if (in_bit >= 0) {
1389 		sks[0] |= 0x8;
1390 		sks[0] |= 0x7 & in_bit;
1391 	}
1392 	put_unaligned_be16(in_byte, sks + 1);
1393 	if (sdebug_dsense) {
1394 		sl = sbuff[7] + 8;
1395 		sbuff[7] = sl;
1396 		sbuff[sl] = 0x2;
1397 		sbuff[sl + 1] = 0x6;
1398 		memcpy(sbuff + sl + 4, sks, 3);
1399 	} else
1400 		memcpy(sbuff + 15, sks, 3);
1401 	if (sdebug_verbose)
1402 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1403 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1404 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1405 }
1406 
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)1407 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1408 {
1409 	if (!scp->sense_buffer) {
1410 		sdev_printk(KERN_ERR, scp->device,
1411 			    "%s: sense_buffer is NULL\n", __func__);
1412 		return;
1413 	}
1414 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1415 
1416 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1417 
1418 	if (sdebug_verbose)
1419 		sdev_printk(KERN_INFO, scp->device,
1420 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1421 			    my_name, key, asc, asq);
1422 }
1423 
1424 /* Sense data that has information fields for tapes */
mk_sense_info_tape(struct scsi_cmnd * scp,int key,int asc,int asq,unsigned int information,unsigned char tape_flags)1425 static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
1426 			unsigned int information, unsigned char tape_flags)
1427 {
1428 	if (!scp->sense_buffer) {
1429 		sdev_printk(KERN_ERR, scp->device,
1430 			    "%s: sense_buffer is NULL\n", __func__);
1431 		return;
1432 	}
1433 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1434 
1435 	scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq);
1436 	/* only fixed format so far */
1437 
1438 	scp->sense_buffer[0] |= 0x80; /* valid */
1439 	scp->sense_buffer[2] |= tape_flags;
1440 	put_unaligned_be32(information, &scp->sense_buffer[3]);
1441 
1442 	if (sdebug_verbose)
1443 		sdev_printk(KERN_INFO, scp->device,
1444 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1445 			    my_name, key, asc, asq);
1446 }
1447 
mk_sense_invalid_opcode(struct scsi_cmnd * scp)1448 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1449 {
1450 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1451 }
1452 
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)1453 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1454 			    void __user *arg)
1455 {
1456 	if (sdebug_verbose) {
1457 		if (0x1261 == cmd)
1458 			sdev_printk(KERN_INFO, dev,
1459 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1460 		else if (0x5331 == cmd)
1461 			sdev_printk(KERN_INFO, dev,
1462 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1463 				    __func__);
1464 		else
1465 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1466 				    __func__, cmd);
1467 	}
1468 	return -EINVAL;
1469 	/* return -ENOTTY; // correct return but upsets fdisk */
1470 }
1471 
config_cdb_len(struct scsi_device * sdev)1472 static void config_cdb_len(struct scsi_device *sdev)
1473 {
1474 	switch (sdebug_cdb_len) {
1475 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1476 		sdev->use_10_for_rw = false;
1477 		sdev->use_16_for_rw = false;
1478 		sdev->use_10_for_ms = false;
1479 		break;
1480 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1481 		sdev->use_10_for_rw = true;
1482 		sdev->use_16_for_rw = false;
1483 		sdev->use_10_for_ms = false;
1484 		break;
1485 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1486 		sdev->use_10_for_rw = true;
1487 		sdev->use_16_for_rw = false;
1488 		sdev->use_10_for_ms = true;
1489 		break;
1490 	case 16:
1491 		sdev->use_10_for_rw = false;
1492 		sdev->use_16_for_rw = true;
1493 		sdev->use_10_for_ms = true;
1494 		break;
1495 	case 32: /* No knobs to suggest this so same as 16 for now */
1496 		sdev->use_10_for_rw = false;
1497 		sdev->use_16_for_rw = true;
1498 		sdev->use_10_for_ms = true;
1499 		break;
1500 	default:
1501 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1502 			sdebug_cdb_len);
1503 		sdev->use_10_for_rw = true;
1504 		sdev->use_16_for_rw = false;
1505 		sdev->use_10_for_ms = false;
1506 		sdebug_cdb_len = 10;
1507 		break;
1508 	}
1509 }
1510 
all_config_cdb_len(void)1511 static void all_config_cdb_len(void)
1512 {
1513 	struct sdebug_host_info *sdbg_host;
1514 	struct Scsi_Host *shost;
1515 	struct scsi_device *sdev;
1516 
1517 	mutex_lock(&sdebug_host_list_mutex);
1518 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1519 		shost = sdbg_host->shost;
1520 		shost_for_each_device(sdev, shost) {
1521 			config_cdb_len(sdev);
1522 		}
1523 	}
1524 	mutex_unlock(&sdebug_host_list_mutex);
1525 }
1526 
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1527 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1528 {
1529 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1530 	struct sdebug_dev_info *dp;
1531 
1532 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1533 		if ((devip->sdbg_host == dp->sdbg_host) &&
1534 		    (devip->target == dp->target)) {
1535 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1536 		}
1537 	}
1538 }
1539 
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1540 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1541 {
1542 	int k;
1543 
1544 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1545 	if (k != SDEBUG_NUM_UAS) {
1546 		const char *cp = NULL;
1547 
1548 		switch (k) {
1549 		case SDEBUG_UA_POR:
1550 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1551 					POWER_ON_RESET_ASCQ);
1552 			if (sdebug_verbose)
1553 				cp = "power on reset";
1554 			break;
1555 		case SDEBUG_UA_POOCCUR:
1556 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1557 					POWER_ON_OCCURRED_ASCQ);
1558 			if (sdebug_verbose)
1559 				cp = "power on occurred";
1560 			break;
1561 		case SDEBUG_UA_BUS_RESET:
1562 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1563 					BUS_RESET_ASCQ);
1564 			if (sdebug_verbose)
1565 				cp = "bus reset";
1566 			break;
1567 		case SDEBUG_UA_MODE_CHANGED:
1568 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1569 					MODE_CHANGED_ASCQ);
1570 			if (sdebug_verbose)
1571 				cp = "mode parameters changed";
1572 			break;
1573 		case SDEBUG_UA_CAPACITY_CHANGED:
1574 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1575 					CAPACITY_CHANGED_ASCQ);
1576 			if (sdebug_verbose)
1577 				cp = "capacity data changed";
1578 			break;
1579 		case SDEBUG_UA_MICROCODE_CHANGED:
1580 			mk_sense_buffer(scp, UNIT_ATTENTION,
1581 					TARGET_CHANGED_ASC,
1582 					MICROCODE_CHANGED_ASCQ);
1583 			if (sdebug_verbose)
1584 				cp = "microcode has been changed";
1585 			break;
1586 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1587 			mk_sense_buffer(scp, UNIT_ATTENTION,
1588 					TARGET_CHANGED_ASC,
1589 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1590 			if (sdebug_verbose)
1591 				cp = "microcode has been changed without reset";
1592 			break;
1593 		case SDEBUG_UA_LUNS_CHANGED:
1594 			/*
1595 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1596 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1597 			 * on the target, until a REPORT LUNS command is
1598 			 * received.  SPC-4 behavior is to report it only once.
1599 			 * NOTE:  sdebug_scsi_level does not use the same
1600 			 * values as struct scsi_device->scsi_level.
1601 			 */
1602 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1603 				clear_luns_changed_on_target(devip);
1604 			mk_sense_buffer(scp, UNIT_ATTENTION,
1605 					TARGET_CHANGED_ASC,
1606 					LUNS_CHANGED_ASCQ);
1607 			if (sdebug_verbose)
1608 				cp = "reported luns data has changed";
1609 			break;
1610 		case SDEBUG_UA_NOT_READY_TO_READY:
1611 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC,
1612 					0);
1613 			if (sdebug_verbose)
1614 				cp = "not ready to ready transition/media change";
1615 			break;
1616 		default:
1617 			pr_warn("unexpected unit attention code=%d\n", k);
1618 			if (sdebug_verbose)
1619 				cp = "unknown";
1620 			break;
1621 		}
1622 		clear_bit(k, devip->uas_bm);
1623 		if (sdebug_verbose)
1624 			sdev_printk(KERN_INFO, scp->device,
1625 				   "%s reports: Unit attention: %s\n",
1626 				   my_name, cp);
1627 		return check_condition_result;
1628 	}
1629 	return 0;
1630 }
1631 
1632 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1633 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1634 				int arr_len)
1635 {
1636 	int act_len;
1637 	struct scsi_data_buffer *sdb = &scp->sdb;
1638 
1639 	if (!sdb->length)
1640 		return 0;
1641 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1642 		return DID_ERROR << 16;
1643 
1644 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1645 				      arr, arr_len);
1646 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1647 
1648 	return 0;
1649 }
1650 
1651 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1652  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1653  * calls, not required to write in ascending offset order. Assumes resid
1654  * set to scsi_bufflen() prior to any calls.
1655  */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1656 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1657 				  int arr_len, unsigned int off_dst)
1658 {
1659 	unsigned int act_len, n;
1660 	struct scsi_data_buffer *sdb = &scp->sdb;
1661 	off_t skip = off_dst;
1662 
1663 	if (sdb->length <= off_dst)
1664 		return 0;
1665 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1666 		return DID_ERROR << 16;
1667 
1668 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1669 				       arr, arr_len, skip);
1670 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1671 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1672 		 scsi_get_resid(scp));
1673 	n = scsi_bufflen(scp) - (off_dst + act_len);
1674 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1675 	return 0;
1676 }
1677 
1678 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1679  * 'arr' or -1 if error.
1680  */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1681 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1682 			       int arr_len)
1683 {
1684 	if (!scsi_bufflen(scp))
1685 		return 0;
1686 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1687 		return -1;
1688 
1689 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1690 }
1691 
1692 
1693 static char sdebug_inq_vendor_id[9] = "Linux   ";
1694 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1695 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1696 /* Use some locally assigned NAAs for SAS addresses. */
1697 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1698 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1699 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1700 
1701 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1702 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1703 			  int target_dev_id, int dev_id_num,
1704 			  const char *dev_id_str, int dev_id_str_len,
1705 			  const uuid_t *lu_name)
1706 {
1707 	int num, port_a;
1708 	char b[32];
1709 
1710 	port_a = target_dev_id + 1;
1711 	/* T10 vendor identifier field format (faked) */
1712 	arr[0] = 0x2;	/* ASCII */
1713 	arr[1] = 0x1;
1714 	arr[2] = 0x0;
1715 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1716 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1717 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1718 	num = 8 + 16 + dev_id_str_len;
1719 	arr[3] = num;
1720 	num += 4;
1721 	if (dev_id_num >= 0) {
1722 		if (sdebug_uuid_ctl) {
1723 			/* Locally assigned UUID */
1724 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1725 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1726 			arr[num++] = 0x0;
1727 			arr[num++] = 0x12;
1728 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1729 			arr[num++] = 0x0;
1730 			memcpy(arr + num, lu_name, 16);
1731 			num += 16;
1732 		} else {
1733 			/* NAA-3, Logical unit identifier (binary) */
1734 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1735 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1736 			arr[num++] = 0x0;
1737 			arr[num++] = 0x8;
1738 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1739 			num += 8;
1740 		}
1741 		/* Target relative port number */
1742 		arr[num++] = 0x61;	/* proto=sas, binary */
1743 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1744 		arr[num++] = 0x0;	/* reserved */
1745 		arr[num++] = 0x4;	/* length */
1746 		arr[num++] = 0x0;	/* reserved */
1747 		arr[num++] = 0x0;	/* reserved */
1748 		arr[num++] = 0x0;
1749 		arr[num++] = 0x1;	/* relative port A */
1750 	}
1751 	/* NAA-3, Target port identifier */
1752 	arr[num++] = 0x61;	/* proto=sas, binary */
1753 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1754 	arr[num++] = 0x0;
1755 	arr[num++] = 0x8;
1756 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1757 	num += 8;
1758 	/* NAA-3, Target port group identifier */
1759 	arr[num++] = 0x61;	/* proto=sas, binary */
1760 	arr[num++] = 0x95;	/* piv=1, target port group id */
1761 	arr[num++] = 0x0;
1762 	arr[num++] = 0x4;
1763 	arr[num++] = 0;
1764 	arr[num++] = 0;
1765 	put_unaligned_be16(port_group_id, arr + num);
1766 	num += 2;
1767 	/* NAA-3, Target device identifier */
1768 	arr[num++] = 0x61;	/* proto=sas, binary */
1769 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1770 	arr[num++] = 0x0;
1771 	arr[num++] = 0x8;
1772 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1773 	num += 8;
1774 	/* SCSI name string: Target device identifier */
1775 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1776 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1777 	arr[num++] = 0x0;
1778 	arr[num++] = 24;
1779 	memcpy(arr + num, "naa.32222220", 12);
1780 	num += 12;
1781 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1782 	memcpy(arr + num, b, 8);
1783 	num += 8;
1784 	memset(arr + num, 0, 4);
1785 	num += 4;
1786 	return num;
1787 }
1788 
1789 static unsigned char vpd84_data[] = {
1790 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1791     0x22,0x22,0x22,0x0,0xbb,0x1,
1792     0x22,0x22,0x22,0x0,0xbb,0x2,
1793 };
1794 
1795 /*  Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1796 static int inquiry_vpd_84(unsigned char *arr)
1797 {
1798 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1799 	return sizeof(vpd84_data);
1800 }
1801 
1802 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1803 static int inquiry_vpd_85(unsigned char *arr)
1804 {
1805 	int num = 0;
1806 	const char *na1 = "https://www.kernel.org/config";
1807 	const char *na2 = "http://www.kernel.org/log";
1808 	int plen, olen;
1809 
1810 	arr[num++] = 0x1;	/* lu, storage config */
1811 	arr[num++] = 0x0;	/* reserved */
1812 	arr[num++] = 0x0;
1813 	olen = strlen(na1);
1814 	plen = olen + 1;
1815 	if (plen % 4)
1816 		plen = ((plen / 4) + 1) * 4;
1817 	arr[num++] = plen;	/* length, null termianted, padded */
1818 	memcpy(arr + num, na1, olen);
1819 	memset(arr + num + olen, 0, plen - olen);
1820 	num += plen;
1821 
1822 	arr[num++] = 0x4;	/* lu, logging */
1823 	arr[num++] = 0x0;	/* reserved */
1824 	arr[num++] = 0x0;
1825 	olen = strlen(na2);
1826 	plen = olen + 1;
1827 	if (plen % 4)
1828 		plen = ((plen / 4) + 1) * 4;
1829 	arr[num++] = plen;	/* length, null terminated, padded */
1830 	memcpy(arr + num, na2, olen);
1831 	memset(arr + num + olen, 0, plen - olen);
1832 	num += plen;
1833 
1834 	return num;
1835 }
1836 
1837 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1838 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1839 {
1840 	int num = 0;
1841 	int port_a, port_b;
1842 
1843 	port_a = target_dev_id + 1;
1844 	port_b = port_a + 1;
1845 	arr[num++] = 0x0;	/* reserved */
1846 	arr[num++] = 0x0;	/* reserved */
1847 	arr[num++] = 0x0;
1848 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1849 	memset(arr + num, 0, 6);
1850 	num += 6;
1851 	arr[num++] = 0x0;
1852 	arr[num++] = 12;	/* length tp descriptor */
1853 	/* naa-5 target port identifier (A) */
1854 	arr[num++] = 0x61;	/* proto=sas, binary */
1855 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1856 	arr[num++] = 0x0;	/* reserved */
1857 	arr[num++] = 0x8;	/* length */
1858 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1859 	num += 8;
1860 	arr[num++] = 0x0;	/* reserved */
1861 	arr[num++] = 0x0;	/* reserved */
1862 	arr[num++] = 0x0;
1863 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1864 	memset(arr + num, 0, 6);
1865 	num += 6;
1866 	arr[num++] = 0x0;
1867 	arr[num++] = 12;	/* length tp descriptor */
1868 	/* naa-5 target port identifier (B) */
1869 	arr[num++] = 0x61;	/* proto=sas, binary */
1870 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1871 	arr[num++] = 0x0;	/* reserved */
1872 	arr[num++] = 0x8;	/* length */
1873 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1874 	num += 8;
1875 
1876 	return num;
1877 }
1878 
1879 
1880 static unsigned char vpd89_data[] = {
1881 /* from 4th byte */ 0,0,0,0,
1882 'l','i','n','u','x',' ',' ',' ',
1883 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1884 '1','2','3','4',
1885 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1886 0xec,0,0,0,
1887 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1888 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1889 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1890 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1891 0x53,0x41,
1892 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1893 0x20,0x20,
1894 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1895 0x10,0x80,
1896 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1897 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1898 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1899 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1900 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1901 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1902 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1903 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1904 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1905 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1906 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1907 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1908 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1909 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1910 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1911 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1912 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1913 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1914 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1915 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1916 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1917 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1918 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1919 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1920 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1921 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1922 };
1923 
1924 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1925 static int inquiry_vpd_89(unsigned char *arr)
1926 {
1927 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1928 	return sizeof(vpd89_data);
1929 }
1930 
1931 
1932 static unsigned char vpdb0_data[] = {
1933 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1934 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1935 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1936 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1937 };
1938 
1939 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1940 static int inquiry_vpd_b0(unsigned char *arr)
1941 {
1942 	unsigned int gran;
1943 
1944 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1945 
1946 	/* Optimal transfer length granularity */
1947 	if (sdebug_opt_xferlen_exp != 0 &&
1948 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1949 		gran = 1 << sdebug_opt_xferlen_exp;
1950 	else
1951 		gran = 1 << sdebug_physblk_exp;
1952 	put_unaligned_be16(gran, arr + 2);
1953 
1954 	/* Maximum Transfer Length */
1955 	if (sdebug_store_sectors > 0x400)
1956 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1957 
1958 	/* Optimal Transfer Length */
1959 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1960 
1961 	if (sdebug_lbpu) {
1962 		/* Maximum Unmap LBA Count */
1963 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1964 
1965 		/* Maximum Unmap Block Descriptor Count */
1966 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1967 	}
1968 
1969 	/* Unmap Granularity Alignment */
1970 	if (sdebug_unmap_alignment) {
1971 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1972 		arr[28] |= 0x80; /* UGAVALID */
1973 	}
1974 
1975 	/* Optimal Unmap Granularity */
1976 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1977 
1978 	/* Maximum WRITE SAME Length */
1979 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1980 
1981 	if (sdebug_atomic_wr) {
1982 		put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
1983 		put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
1984 		put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
1985 		put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
1986 		put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
1987 	}
1988 
1989 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1990 }
1991 
1992 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1993 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1994 {
1995 	memset(arr, 0, 0x3c);
1996 	arr[0] = 0;
1997 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1998 	arr[2] = 0;
1999 	arr[3] = 5;	/* less than 1.8" */
2000 
2001 	return 0x3c;
2002 }
2003 
2004 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)2005 static int inquiry_vpd_b2(unsigned char *arr)
2006 {
2007 	memset(arr, 0, 0x4);
2008 	arr[0] = 0;			/* threshold exponent */
2009 	if (sdebug_lbpu)
2010 		arr[1] = 1 << 7;
2011 	if (sdebug_lbpws)
2012 		arr[1] |= 1 << 6;
2013 	if (sdebug_lbpws10)
2014 		arr[1] |= 1 << 5;
2015 	if (sdebug_lbprz && scsi_debug_lbp())
2016 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
2017 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
2018 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
2019 	/* threshold_percentage=0 */
2020 	return 0x4;
2021 }
2022 
2023 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)2024 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
2025 {
2026 	memset(arr, 0, 0x3c);
2027 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
2028 	/*
2029 	 * Set Optimal number of open sequential write preferred zones and
2030 	 * Optimal number of non-sequentially written sequential write
2031 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
2032 	 * fields set to zero, apart from Max. number of open swrz_s field.
2033 	 */
2034 	put_unaligned_be32(0xffffffff, &arr[4]);
2035 	put_unaligned_be32(0xffffffff, &arr[8]);
2036 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
2037 		put_unaligned_be32(devip->max_open, &arr[12]);
2038 	else
2039 		put_unaligned_be32(0xffffffff, &arr[12]);
2040 	if (devip->zcap < devip->zsize) {
2041 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
2042 		put_unaligned_be64(devip->zsize, &arr[20]);
2043 	} else {
2044 		arr[19] = 0;
2045 	}
2046 	return 0x3c;
2047 }
2048 
2049 #define SDEBUG_BLE_LEN_AFTER_B4 28	/* thus vpage 32 bytes long */
2050 
2051 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
2052 
2053 /* Block limits extension VPD page (SBC-4) */
inquiry_vpd_b7(unsigned char * arrb4)2054 static int inquiry_vpd_b7(unsigned char *arrb4)
2055 {
2056 	memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
2057 	arrb4[1] = 1; /* Reduced stream control support (RSCS) */
2058 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
2059 	return SDEBUG_BLE_LEN_AFTER_B4;
2060 }
2061 
2062 #define SDEBUG_LONG_INQ_SZ 96
2063 #define SDEBUG_MAX_INQ_ARR_SZ 584
2064 
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2065 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2066 {
2067 	unsigned char pq_pdt;
2068 	unsigned char *arr;
2069 	unsigned char *cmd = scp->cmnd;
2070 	u32 alloc_len, n;
2071 	int ret;
2072 	bool have_wlun, is_disk, is_zbc, is_disk_zbc, is_tape;
2073 
2074 	alloc_len = get_unaligned_be16(cmd + 3);
2075 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
2076 	if (! arr)
2077 		return DID_REQUEUE << 16;
2078 	if (scp->device->type >= 32) {
2079 		is_disk = (sdebug_ptype == TYPE_DISK);
2080 		is_tape = (sdebug_ptype == TYPE_TAPE);
2081 	} else {
2082 		is_disk = (scp->device->type == TYPE_DISK);
2083 		is_tape = (scp->device->type == TYPE_TAPE);
2084 	}
2085 	is_zbc = devip->zoned;
2086 	is_disk_zbc = (is_disk || is_zbc);
2087 	have_wlun = scsi_is_wlun(scp->device->lun);
2088 	if (have_wlun)
2089 		pq_pdt = TYPE_WLUN;	/* present, wlun */
2090 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
2091 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
2092 	else
2093 		pq_pdt = ((scp->device->type >= 32 ?
2094 				sdebug_ptype : scp->device->type) & 0x1f);
2095 	arr[0] = pq_pdt;
2096 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
2097 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
2098 		kfree(arr);
2099 		return check_condition_result;
2100 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
2101 		int lu_id_num, port_group_id, target_dev_id;
2102 		u32 len;
2103 		char lu_id_str[6];
2104 		int host_no = devip->sdbg_host->shost->host_no;
2105 
2106 		arr[1] = cmd[2];
2107 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
2108 		    (devip->channel & 0x7f);
2109 		if (sdebug_vpd_use_hostno == 0)
2110 			host_no = 0;
2111 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
2112 			    (devip->target * 1000) + devip->lun);
2113 		target_dev_id = ((host_no + 1) * 2000) +
2114 				 (devip->target * 1000) - 3;
2115 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
2116 		if (0 == cmd[2]) { /* supported vital product data pages */
2117 			n = 4;
2118 			arr[n++] = 0x0;   /* this page */
2119 			arr[n++] = 0x80;  /* unit serial number */
2120 			arr[n++] = 0x83;  /* device identification */
2121 			arr[n++] = 0x84;  /* software interface ident. */
2122 			arr[n++] = 0x85;  /* management network addresses */
2123 			arr[n++] = 0x86;  /* extended inquiry */
2124 			arr[n++] = 0x87;  /* mode page policy */
2125 			arr[n++] = 0x88;  /* SCSI ports */
2126 			if (is_disk_zbc) {	  /* SBC or ZBC */
2127 				arr[n++] = 0x89;  /* ATA information */
2128 				arr[n++] = 0xb0;  /* Block limits */
2129 				arr[n++] = 0xb1;  /* Block characteristics */
2130 				if (is_disk)
2131 					arr[n++] = 0xb2;  /* LB Provisioning */
2132 				if (is_zbc)
2133 					arr[n++] = 0xb6;  /* ZB dev. char. */
2134 				arr[n++] = 0xb7;  /* Block limits extension */
2135 			}
2136 			arr[3] = n - 4;	  /* number of supported VPD pages */
2137 		} else if (0x80 == cmd[2]) { /* unit serial number */
2138 			arr[3] = len;
2139 			memcpy(&arr[4], lu_id_str, len);
2140 		} else if (0x83 == cmd[2]) { /* device identification */
2141 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
2142 						target_dev_id, lu_id_num,
2143 						lu_id_str, len,
2144 						&devip->lu_name);
2145 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
2146 			arr[3] = inquiry_vpd_84(&arr[4]);
2147 		} else if (0x85 == cmd[2]) { /* Management network addresses */
2148 			arr[3] = inquiry_vpd_85(&arr[4]);
2149 		} else if (0x86 == cmd[2]) { /* extended inquiry */
2150 			arr[3] = 0x3c;	/* number of following entries */
2151 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
2152 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
2153 			else if (have_dif_prot)
2154 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
2155 			else
2156 				arr[4] = 0x0;   /* no protection stuff */
2157 			/*
2158 			 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
2159 			 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
2160 			 */
2161 			arr[5] = 0x17;
2162 		} else if (0x87 == cmd[2]) { /* mode page policy */
2163 			arr[3] = 0x8;	/* number of following entries */
2164 			arr[4] = 0x2;	/* disconnect-reconnect mp */
2165 			arr[6] = 0x80;	/* mlus, shared */
2166 			arr[8] = 0x18;	 /* protocol specific lu */
2167 			arr[10] = 0x82;	 /* mlus, per initiator port */
2168 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
2169 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
2170 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
2171 			n = inquiry_vpd_89(&arr[4]);
2172 			put_unaligned_be16(n, arr + 2);
2173 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2174 			arr[3] = inquiry_vpd_b0(&arr[4]);
2175 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2176 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2177 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2178 			arr[3] = inquiry_vpd_b2(&arr[4]);
2179 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2180 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2181 		} else if (cmd[2] == 0xb7) { /* block limits extension page */
2182 			arr[3] = inquiry_vpd_b7(&arr[4]);
2183 		} else {
2184 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2185 			kfree(arr);
2186 			return check_condition_result;
2187 		}
2188 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2189 		ret = fill_from_dev_buffer(scp, arr,
2190 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2191 		kfree(arr);
2192 		return ret;
2193 	}
2194 	/* drops through here for a standard inquiry */
2195 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
2196 	arr[2] = sdebug_scsi_level;
2197 	arr[3] = 2;    /* response_data_format==2 */
2198 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2199 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
2200 	if (sdebug_vpd_use_hostno == 0)
2201 		arr[5] |= 0x10; /* claim: implicit TPGS */
2202 	arr[6] = 0x10; /* claim: MultiP */
2203 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2204 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2205 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2206 	memcpy(&arr[16], sdebug_inq_product_id, 16);
2207 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
2208 	/* Use Vendor Specific area to place driver date in ASCII hex */
2209 	memcpy(&arr[36], sdebug_version_date, 8);
2210 	/* version descriptors (2 bytes each) follow */
2211 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2212 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2213 	n = 62;
2214 	if (is_disk) {		/* SBC-4 no version claimed */
2215 		put_unaligned_be16(0x600, arr + n);
2216 		n += 2;
2217 	} else if (is_tape) {	/* SSC-4 rev 3 */
2218 		put_unaligned_be16(0x525, arr + n);
2219 		n += 2;
2220 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
2221 		put_unaligned_be16(0x624, arr + n);
2222 		n += 2;
2223 	}
2224 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
2225 	ret = fill_from_dev_buffer(scp, arr,
2226 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2227 	kfree(arr);
2228 	return ret;
2229 }
2230 
2231 /* See resp_iec_m_pg() for how this data is manipulated */
2232 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2233 				   0, 0, 0x0, 0x0};
2234 
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2235 static int resp_requests(struct scsi_cmnd *scp,
2236 			 struct sdebug_dev_info *devip)
2237 {
2238 	unsigned char *cmd = scp->cmnd;
2239 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
2240 	bool dsense = !!(cmd[1] & 1);
2241 	u32 alloc_len = cmd[4];
2242 	u32 len = 18;
2243 	int stopped_state = atomic_read(&devip->stopped);
2244 
2245 	memset(arr, 0, sizeof(arr));
2246 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
2247 		if (dsense) {
2248 			arr[0] = 0x72;
2249 			arr[1] = NOT_READY;
2250 			arr[2] = LOGICAL_UNIT_NOT_READY;
2251 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2252 			len = 8;
2253 		} else {
2254 			arr[0] = 0x70;
2255 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
2256 			arr[7] = 0xa;			/* 18 byte sense buffer */
2257 			arr[12] = LOGICAL_UNIT_NOT_READY;
2258 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2259 		}
2260 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2261 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
2262 		if (dsense) {
2263 			arr[0] = 0x72;
2264 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
2265 			arr[2] = THRESHOLD_EXCEEDED;
2266 			arr[3] = 0xff;		/* Failure prediction(false) */
2267 			len = 8;
2268 		} else {
2269 			arr[0] = 0x70;
2270 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
2271 			arr[7] = 0xa;   	/* 18 byte sense buffer */
2272 			arr[12] = THRESHOLD_EXCEEDED;
2273 			arr[13] = 0xff;		/* Failure prediction(false) */
2274 		}
2275 	} else {	/* nothing to report */
2276 		if (dsense) {
2277 			len = 8;
2278 			memset(arr, 0, len);
2279 			arr[0] = 0x72;
2280 		} else {
2281 			memset(arr, 0, len);
2282 			arr[0] = 0x70;
2283 			arr[7] = 0xa;
2284 		}
2285 	}
2286 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2287 }
2288 
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2289 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2290 {
2291 	unsigned char *cmd = scp->cmnd;
2292 	int power_cond, want_stop, stopped_state;
2293 	bool changing;
2294 
2295 	power_cond = (cmd[4] & 0xf0) >> 4;
2296 	if (power_cond) {
2297 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2298 		return check_condition_result;
2299 	}
2300 	want_stop = !(cmd[4] & 1);
2301 	stopped_state = atomic_read(&devip->stopped);
2302 	if (stopped_state == 2) {
2303 		ktime_t now_ts = ktime_get_boottime();
2304 
2305 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2306 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2307 
2308 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2309 				/* tur_ms_to_ready timer extinguished */
2310 				atomic_set(&devip->stopped, 0);
2311 				stopped_state = 0;
2312 			}
2313 		}
2314 		if (stopped_state == 2) {
2315 			if (want_stop) {
2316 				stopped_state = 1;	/* dummy up success */
2317 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
2318 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2319 				return check_condition_result;
2320 			}
2321 		}
2322 	}
2323 	changing = (stopped_state != want_stop);
2324 	if (changing)
2325 		atomic_xchg(&devip->stopped, want_stop);
2326 	if (scp->device->type == TYPE_TAPE && !want_stop) {
2327 		int i;
2328 
2329 		set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
2330 		for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
2331 			devip->tape_location[i] = 0;
2332 		devip->tape_partition = 0;
2333 	}
2334 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2335 		return SDEG_RES_IMMED_MASK;
2336 	else
2337 		return 0;
2338 }
2339 
get_sdebug_capacity(void)2340 static sector_t get_sdebug_capacity(void)
2341 {
2342 	static const unsigned int gibibyte = 1073741824;
2343 
2344 	if (sdebug_virtual_gb > 0)
2345 		return (sector_t)sdebug_virtual_gb *
2346 			(gibibyte / sdebug_sector_size);
2347 	else
2348 		return sdebug_store_sectors;
2349 }
2350 
2351 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2352 static int resp_readcap(struct scsi_cmnd *scp,
2353 			struct sdebug_dev_info *devip)
2354 {
2355 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2356 	unsigned int capac;
2357 
2358 	/* following just in case virtual_gb changed */
2359 	sdebug_capacity = get_sdebug_capacity();
2360 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2361 	if (sdebug_capacity < 0xffffffff) {
2362 		capac = (unsigned int)sdebug_capacity - 1;
2363 		put_unaligned_be32(capac, arr + 0);
2364 	} else
2365 		put_unaligned_be32(0xffffffff, arr + 0);
2366 	put_unaligned_be16(sdebug_sector_size, arr + 6);
2367 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2368 }
2369 
2370 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2371 static int resp_readcap16(struct scsi_cmnd *scp,
2372 			  struct sdebug_dev_info *devip)
2373 {
2374 	unsigned char *cmd = scp->cmnd;
2375 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2376 	u32 alloc_len;
2377 
2378 	alloc_len = get_unaligned_be32(cmd + 10);
2379 	/* following just in case virtual_gb changed */
2380 	sdebug_capacity = get_sdebug_capacity();
2381 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2382 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2383 	put_unaligned_be32(sdebug_sector_size, arr + 8);
2384 	arr[13] = sdebug_physblk_exp & 0xf;
2385 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2386 
2387 	if (scsi_debug_lbp()) {
2388 		arr[14] |= 0x80; /* LBPME */
2389 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2390 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2391 		 * in the wider field maps to 0 in this field.
2392 		 */
2393 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
2394 			arr[14] |= 0x40;
2395 	}
2396 
2397 	/*
2398 	 * Since the scsi_debug READ CAPACITY implementation always reports the
2399 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2400 	 */
2401 	if (devip->zoned)
2402 		arr[12] |= 1 << 4;
2403 
2404 	arr[15] = sdebug_lowest_aligned & 0xff;
2405 
2406 	if (have_dif_prot) {
2407 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2408 		arr[12] |= 1; /* PROT_EN */
2409 	}
2410 
2411 	return fill_from_dev_buffer(scp, arr,
2412 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2413 }
2414 
2415 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2416 
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2417 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2418 			      struct sdebug_dev_info *devip)
2419 {
2420 	unsigned char *cmd = scp->cmnd;
2421 	unsigned char *arr;
2422 	int host_no = devip->sdbg_host->shost->host_no;
2423 	int port_group_a, port_group_b, port_a, port_b;
2424 	u32 alen, n, rlen;
2425 	int ret;
2426 
2427 	alen = get_unaligned_be32(cmd + 6);
2428 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2429 	if (! arr)
2430 		return DID_REQUEUE << 16;
2431 	/*
2432 	 * EVPD page 0x88 states we have two ports, one
2433 	 * real and a fake port with no device connected.
2434 	 * So we create two port groups with one port each
2435 	 * and set the group with port B to unavailable.
2436 	 */
2437 	port_a = 0x1; /* relative port A */
2438 	port_b = 0x2; /* relative port B */
2439 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
2440 			(devip->channel & 0x7f);
2441 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
2442 			(devip->channel & 0x7f) + 0x80;
2443 
2444 	/*
2445 	 * The asymmetric access state is cycled according to the host_id.
2446 	 */
2447 	n = 4;
2448 	if (sdebug_vpd_use_hostno == 0) {
2449 		arr[n++] = host_no % 3; /* Asymm access state */
2450 		arr[n++] = 0x0F; /* claim: all states are supported */
2451 	} else {
2452 		arr[n++] = 0x0; /* Active/Optimized path */
2453 		arr[n++] = 0x01; /* only support active/optimized paths */
2454 	}
2455 	put_unaligned_be16(port_group_a, arr + n);
2456 	n += 2;
2457 	arr[n++] = 0;    /* Reserved */
2458 	arr[n++] = 0;    /* Status code */
2459 	arr[n++] = 0;    /* Vendor unique */
2460 	arr[n++] = 0x1;  /* One port per group */
2461 	arr[n++] = 0;    /* Reserved */
2462 	arr[n++] = 0;    /* Reserved */
2463 	put_unaligned_be16(port_a, arr + n);
2464 	n += 2;
2465 	arr[n++] = 3;    /* Port unavailable */
2466 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2467 	put_unaligned_be16(port_group_b, arr + n);
2468 	n += 2;
2469 	arr[n++] = 0;    /* Reserved */
2470 	arr[n++] = 0;    /* Status code */
2471 	arr[n++] = 0;    /* Vendor unique */
2472 	arr[n++] = 0x1;  /* One port per group */
2473 	arr[n++] = 0;    /* Reserved */
2474 	arr[n++] = 0;    /* Reserved */
2475 	put_unaligned_be16(port_b, arr + n);
2476 	n += 2;
2477 
2478 	rlen = n - 4;
2479 	put_unaligned_be32(rlen, arr + 0);
2480 
2481 	/*
2482 	 * Return the smallest value of either
2483 	 * - The allocated length
2484 	 * - The constructed command length
2485 	 * - The maximum array size
2486 	 */
2487 	rlen = min(alen, n);
2488 	ret = fill_from_dev_buffer(scp, arr,
2489 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2490 	kfree(arr);
2491 	return ret;
2492 }
2493 
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2494 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2495 			     struct sdebug_dev_info *devip)
2496 {
2497 	bool rctd;
2498 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2499 	u16 req_sa, u;
2500 	u32 alloc_len, a_len;
2501 	int k, offset, len, errsts, bump, na;
2502 	const struct opcode_info_t *oip;
2503 	const struct opcode_info_t *r_oip;
2504 	u8 *arr;
2505 	u8 *cmd = scp->cmnd;
2506 	u32 devsel = sdebug_get_devsel(scp->device);
2507 
2508 	rctd = !!(cmd[2] & 0x80);
2509 	reporting_opts = cmd[2] & 0x7;
2510 	req_opcode = cmd[3];
2511 	req_sa = get_unaligned_be16(cmd + 4);
2512 	alloc_len = get_unaligned_be32(cmd + 6);
2513 	if (alloc_len < 4 || alloc_len > 0xffff) {
2514 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2515 		return check_condition_result;
2516 	}
2517 	if (alloc_len > 8192)
2518 		a_len = 8192;
2519 	else
2520 		a_len = alloc_len;
2521 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2522 	if (NULL == arr) {
2523 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2524 				INSUFF_RES_ASCQ);
2525 		return check_condition_result;
2526 	}
2527 	switch (reporting_opts) {
2528 	case 0:	/* all commands */
2529 		bump = rctd ? 20 : 8;
2530 		for (offset = 4, oip = opcode_info_arr;
2531 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2532 			if (F_INV_OP & oip->flags)
2533 				continue;
2534 			if ((devsel & oip->devsel) != 0) {
2535 				arr[offset] = oip->opcode;
2536 				put_unaligned_be16(oip->sa, arr + offset + 2);
2537 				if (rctd)
2538 					arr[offset + 5] |= 0x2;
2539 				if (FF_SA & oip->flags)
2540 					arr[offset + 5] |= 0x1;
2541 				put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2542 				if (rctd)
2543 					put_unaligned_be16(0xa, arr + offset + 8);
2544 				offset += bump;
2545 			}
2546 			na = oip->num_attached;
2547 			r_oip = oip;
2548 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2549 				if (F_INV_OP & oip->flags)
2550 					continue;
2551 				if ((devsel & oip->devsel) == 0)
2552 					continue;
2553 				arr[offset] = oip->opcode;
2554 				put_unaligned_be16(oip->sa, arr + offset + 2);
2555 				if (rctd)
2556 					arr[offset + 5] |= 0x2;
2557 				if (FF_SA & oip->flags)
2558 					arr[offset + 5] |= 0x1;
2559 				put_unaligned_be16(oip->len_mask[0],
2560 						arr + offset + 6);
2561 				if (rctd)
2562 					put_unaligned_be16(0xa,
2563 							   arr + offset + 8);
2564 				offset += bump;
2565 			}
2566 			oip = r_oip;
2567 		}
2568 		put_unaligned_be32(offset - 4, arr);
2569 		break;
2570 	case 1:	/* one command: opcode only */
2571 	case 2:	/* one command: opcode plus service action */
2572 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2573 		sdeb_i = opcode_ind_arr[req_opcode];
2574 		oip = &opcode_info_arr[sdeb_i];
2575 		if (F_INV_OP & oip->flags) {
2576 			supp = 1;
2577 			offset = 4;
2578 		} else {
2579 			if (1 == reporting_opts) {
2580 				if (FF_SA & oip->flags) {
2581 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2582 							     2, 2);
2583 					kfree(arr);
2584 					return check_condition_result;
2585 				}
2586 				req_sa = 0;
2587 			} else if (2 == reporting_opts &&
2588 				   0 == (FF_SA & oip->flags)) {
2589 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2590 				kfree(arr);	/* point at requested sa */
2591 				return check_condition_result;
2592 			}
2593 			if (0 == (FF_SA & oip->flags) &&
2594 				(devsel & oip->devsel) != 0 &&
2595 				req_opcode == oip->opcode)
2596 				supp = 3;
2597 			else if (0 == (FF_SA & oip->flags)) {
2598 				na = oip->num_attached;
2599 				for (k = 0, oip = oip->arrp; k < na;
2600 				     ++k, ++oip) {
2601 					if (req_opcode == oip->opcode &&
2602 						(devsel & oip->devsel) != 0)
2603 						break;
2604 				}
2605 				supp = (k >= na) ? 1 : 3;
2606 			} else if (req_sa != oip->sa) {
2607 				na = oip->num_attached;
2608 				for (k = 0, oip = oip->arrp; k < na;
2609 				     ++k, ++oip) {
2610 					if (req_sa == oip->sa &&
2611 						(devsel & oip->devsel) != 0)
2612 						break;
2613 				}
2614 				supp = (k >= na) ? 1 : 3;
2615 			} else
2616 				supp = 3;
2617 			if (3 == supp) {
2618 				u = oip->len_mask[0];
2619 				put_unaligned_be16(u, arr + 2);
2620 				arr[4] = oip->opcode;
2621 				for (k = 1; k < u; ++k)
2622 					arr[4 + k] = (k < 16) ?
2623 						 oip->len_mask[k] : 0xff;
2624 				offset = 4 + u;
2625 			} else
2626 				offset = 4;
2627 		}
2628 		arr[1] = (rctd ? 0x80 : 0) | supp;
2629 		if (rctd) {
2630 			put_unaligned_be16(0xa, arr + offset);
2631 			offset += 12;
2632 		}
2633 		break;
2634 	default:
2635 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2636 		kfree(arr);
2637 		return check_condition_result;
2638 	}
2639 	offset = (offset < a_len) ? offset : a_len;
2640 	len = (offset < alloc_len) ? offset : alloc_len;
2641 	errsts = fill_from_dev_buffer(scp, arr, len);
2642 	kfree(arr);
2643 	return errsts;
2644 }
2645 
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2646 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2647 			  struct sdebug_dev_info *devip)
2648 {
2649 	bool repd;
2650 	u32 alloc_len, len;
2651 	u8 arr[16];
2652 	u8 *cmd = scp->cmnd;
2653 
2654 	memset(arr, 0, sizeof(arr));
2655 	repd = !!(cmd[2] & 0x80);
2656 	alloc_len = get_unaligned_be32(cmd + 6);
2657 	if (alloc_len < 4) {
2658 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2659 		return check_condition_result;
2660 	}
2661 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2662 	arr[1] = 0x1;		/* ITNRS */
2663 	if (repd) {
2664 		arr[3] = 0xc;
2665 		len = 16;
2666 	} else
2667 		len = 4;
2668 
2669 	len = (len < alloc_len) ? len : alloc_len;
2670 	return fill_from_dev_buffer(scp, arr, len);
2671 }
2672 
2673 /* <<Following mode page info copied from ST318451LW>> */
2674 
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2675 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2676 {	/* Read-Write Error Recovery page for mode_sense */
2677 	static const unsigned char err_recov_pg[] = {
2678 		0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2679 		5, 0, 0xff, 0xff
2680 	};
2681 
2682 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2683 	if (1 == pcontrol)
2684 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2685 	return sizeof(err_recov_pg);
2686 }
2687 
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2688 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2689 { 	/* Disconnect-Reconnect page for mode_sense */
2690 	static const unsigned char disconnect_pg[] = {
2691 		0x2, 0xe, 128, 128, 0, 10, 0, 0,
2692 		0, 0, 0, 0, 0, 0, 0, 0
2693 	};
2694 
2695 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2696 	if (1 == pcontrol)
2697 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2698 	return sizeof(disconnect_pg);
2699 }
2700 
resp_format_pg(unsigned char * p,int pcontrol,int target)2701 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2702 {       /* Format device page for mode_sense */
2703 	static const unsigned char format_pg[] = {
2704 		0x3, 0x16, 0, 0, 0, 0, 0, 0,
2705 		0, 0, 0, 0, 0, 0, 0, 0,
2706 		0, 0, 0, 0, 0x40, 0, 0, 0
2707 	};
2708 
2709 	memcpy(p, format_pg, sizeof(format_pg));
2710 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2711 	put_unaligned_be16(sdebug_sector_size, p + 12);
2712 	if (sdebug_removable)
2713 		p[20] |= 0x20; /* should agree with INQUIRY */
2714 	if (1 == pcontrol)
2715 		memset(p + 2, 0, sizeof(format_pg) - 2);
2716 	return sizeof(format_pg);
2717 }
2718 
2719 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2720 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2721 				     0, 0, 0, 0};
2722 
resp_caching_pg(unsigned char * p,int pcontrol,int target)2723 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2724 { 	/* Caching page for mode_sense */
2725 	static const unsigned char ch_caching_pg[] = {
2726 		/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2727 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2728 	};
2729 	static const unsigned char d_caching_pg[] = {
2730 		0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2731 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0
2732 	};
2733 
2734 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2735 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2736 	memcpy(p, caching_pg, sizeof(caching_pg));
2737 	if (1 == pcontrol)
2738 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2739 	else if (2 == pcontrol)
2740 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2741 	return sizeof(caching_pg);
2742 }
2743 
2744 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2745 				    0, 0, 0x2, 0x4b};
2746 
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2747 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2748 { 	/* Control mode page for mode_sense */
2749 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2750 					0, 0, 0, 0};
2751 	static const unsigned char d_ctrl_m_pg[] = {
2752 		0xa, 10, 2, 0, 0, 0, 0, 0,
2753 		0, 0, 0x2, 0x4b
2754 	};
2755 
2756 	if (sdebug_dsense)
2757 		ctrl_m_pg[2] |= 0x4;
2758 	else
2759 		ctrl_m_pg[2] &= ~0x4;
2760 
2761 	if (sdebug_ato)
2762 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2763 
2764 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2765 	if (1 == pcontrol)
2766 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2767 	else if (2 == pcontrol)
2768 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2769 	return sizeof(ctrl_m_pg);
2770 }
2771 
2772 /* IO Advice Hints Grouping mode page */
resp_grouping_m_pg(unsigned char * p,int pcontrol,int target)2773 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2774 {
2775 	/* IO Advice Hints Grouping mode page */
2776 	struct grouping_m_pg {
2777 		u8 page_code;	/* OR 0x40 when subpage_code > 0 */
2778 		u8 subpage_code;
2779 		__be16 page_length;
2780 		u8 reserved[12];
2781 		struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2782 	};
2783 	static const struct grouping_m_pg gr_m_pg = {
2784 		.page_code = 0xa | 0x40,
2785 		.subpage_code = 5,
2786 		.page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2787 		.descr = {
2788 			{ .st_enble = 1 },
2789 			{ .st_enble = 1 },
2790 			{ .st_enble = 1 },
2791 			{ .st_enble = 1 },
2792 			{ .st_enble = 1 },
2793 			{ .st_enble = 0 },
2794 		}
2795 	};
2796 
2797 	BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2798 		     16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2799 	memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2800 	if (1 == pcontrol) {
2801 		/* There are no changeable values so clear from byte 4 on. */
2802 		memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2803 	}
2804 	return sizeof(gr_m_pg);
2805 }
2806 
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2807 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2808 {	/* Informational Exceptions control mode page for mode_sense */
2809 	static const unsigned char ch_iec_m_pg[] = {
2810 		/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2811 		0, 0, 0x0, 0x0
2812 	};
2813 	static const unsigned char d_iec_m_pg[] = {
2814 		0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2815 		0, 0, 0x0, 0x0
2816 	};
2817 
2818 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2819 	if (1 == pcontrol)
2820 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2821 	else if (2 == pcontrol)
2822 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2823 	return sizeof(iec_m_pg);
2824 }
2825 
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2826 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2827 {	/* SAS SSP mode page - short format for mode_sense */
2828 	static const unsigned char sas_sf_m_pg[] = {
2829 		0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0
2830 	};
2831 
2832 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2833 	if (1 == pcontrol)
2834 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2835 	return sizeof(sas_sf_m_pg);
2836 }
2837 
2838 
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2839 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2840 			      int target_dev_id)
2841 {	/* SAS phy control and discover mode page for mode_sense */
2842 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2843 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2844 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2845 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2846 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2847 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2848 		    0, 0, 0, 0, 0, 0, 0, 0,
2849 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2850 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2851 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2852 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2853 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2854 		    0, 0, 0, 0, 0, 0, 0, 0,
2855 		};
2856 	int port_a, port_b;
2857 
2858 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2859 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2860 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2861 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2862 	port_a = target_dev_id + 1;
2863 	port_b = port_a + 1;
2864 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2865 	put_unaligned_be32(port_a, p + 20);
2866 	put_unaligned_be32(port_b, p + 48 + 20);
2867 	if (1 == pcontrol)
2868 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2869 	return sizeof(sas_pcd_m_pg);
2870 }
2871 
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2872 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2873 {	/* SAS SSP shared protocol specific port mode subpage */
2874 	static const unsigned char sas_sha_m_pg[] = {
2875 		0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2876 		0, 0, 0, 0, 0, 0, 0, 0,
2877 	};
2878 
2879 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2880 	if (1 == pcontrol)
2881 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2882 	return sizeof(sas_sha_m_pg);
2883 }
2884 
2885 static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0,
2886 	0xff, 0xff, 0x00, 0x00};
2887 
resp_partition_m_pg(unsigned char * p,int pcontrol,int target)2888 static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target)
2889 {	/* Partition page for mode_sense (tape) */
2890 	memcpy(p, partition_pg, sizeof(partition_pg));
2891 	if (pcontrol == 1)
2892 		memset(p + 2, 0, sizeof(partition_pg) - 2);
2893 	return sizeof(partition_pg);
2894 }
2895 
process_medium_part_m_pg(struct sdebug_dev_info * devip,unsigned char * new,int pg_len)2896 static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
2897 				unsigned char *new, int pg_len)
2898 {
2899 	int new_nbr, p0_size, p1_size;
2900 
2901 	if ((new[4] & 0x80) != 0) { /* FDP */
2902 		partition_pg[4] |= 0x80;
2903 		devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS;
2904 		devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS;
2905 		devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS;
2906 	} else {
2907 		new_nbr = new[3] + 1;
2908 		if (new_nbr > TAPE_MAX_PARTITIONS)
2909 			return 3;
2910 		if ((new[4] & 0x40) != 0) { /* SDP */
2911 			p1_size = TAPE_PARTITION_1_UNITS;
2912 			p0_size = TAPE_UNITS - p1_size;
2913 			if (p0_size < 100)
2914 				return 4;
2915 		} else if ((new[4] & 0x20) != 0) {
2916 			if (new_nbr > 1) {
2917 				p0_size = get_unaligned_be16(new + 8);
2918 				p1_size = get_unaligned_be16(new + 10);
2919 				if (p1_size == 0xFFFF)
2920 					p1_size = TAPE_UNITS - p0_size;
2921 				else if (p0_size == 0xFFFF)
2922 					p0_size = TAPE_UNITS - p1_size;
2923 				if (p0_size < 100 || p1_size < 100)
2924 					return 8;
2925 			} else {
2926 				p0_size = TAPE_UNITS;
2927 				p1_size = 0;
2928 			}
2929 		} else
2930 			return 6;
2931 		devip->tape_pending_nbr_partitions = new_nbr;
2932 		devip->tape_pending_part_0_size = p0_size;
2933 		devip->tape_pending_part_1_size = p1_size;
2934 		partition_pg[3] = new_nbr;
2935 		devip->tape_pending_nbr_partitions = new_nbr;
2936 	}
2937 
2938 	return 0;
2939 }
2940 
resp_compression_m_pg(unsigned char * p,int pcontrol,int target,unsigned char dce)2941 static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
2942 	unsigned char dce)
2943 {	/* Compression page for mode_sense (tape) */
2944 	static const unsigned char compression_pg[] = {
2945 		0x0f, 14, 0x40, 0, 0, 0, 0, 0,
2946 		0, 0, 0, 0, 0, 0
2947 	};
2948 
2949 	memcpy(p, compression_pg, sizeof(compression_pg));
2950 	if (dce)
2951 		p[2] |= 0x80;
2952 	if (pcontrol == 1)
2953 		memset(p + 2, 0, sizeof(compression_pg) - 2);
2954 	return sizeof(compression_pg);
2955 }
2956 
2957 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2958 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2959 
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2960 static int resp_mode_sense(struct scsi_cmnd *scp,
2961 			   struct sdebug_dev_info *devip)
2962 {
2963 	int pcontrol, pcode, subpcode, bd_len;
2964 	unsigned char dev_spec;
2965 	u32 alloc_len, offset, len;
2966 	int target_dev_id;
2967 	int target = scp->device->id;
2968 	unsigned char *ap;
2969 	unsigned char *arr __free(kfree);
2970 	unsigned char *cmd = scp->cmnd;
2971 	bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
2972 
2973 	arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2974 	if (!arr)
2975 		return -ENOMEM;
2976 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2977 	pcontrol = (cmd[2] & 0xc0) >> 6;
2978 	pcode = cmd[2] & 0x3f;
2979 	subpcode = cmd[3];
2980 	msense_6 = (MODE_SENSE == cmd[0]);
2981 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2982 	is_disk = (scp->device->type == TYPE_DISK);
2983 	is_zbc = devip->zoned;
2984 	is_tape = (scp->device->type == TYPE_TAPE);
2985 	if ((is_disk || is_zbc || is_tape) && !dbd)
2986 		bd_len = llbaa ? 16 : 8;
2987 	else
2988 		bd_len = 0;
2989 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2990 	if (0x3 == pcontrol) {  /* Saving values not supported */
2991 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2992 		return check_condition_result;
2993 	}
2994 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2995 			(devip->target * 1000) - 3;
2996 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2997 	if (is_disk || is_zbc) {
2998 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2999 		if (sdebug_wp)
3000 			dev_spec |= 0x80;
3001 	} else
3002 		dev_spec = 0x0;
3003 	if (msense_6) {
3004 		arr[2] = dev_spec;
3005 		arr[3] = bd_len;
3006 		offset = 4;
3007 	} else {
3008 		arr[3] = dev_spec;
3009 		if (16 == bd_len)
3010 			arr[4] = 0x1;	/* set LONGLBA bit */
3011 		arr[7] = bd_len;	/* assume 255 or less */
3012 		offset = 8;
3013 	}
3014 	ap = arr + offset;
3015 	if ((bd_len > 0) && (!sdebug_capacity))
3016 		sdebug_capacity = get_sdebug_capacity();
3017 
3018 	if (8 == bd_len) {
3019 		if (sdebug_capacity > 0xfffffffe)
3020 			put_unaligned_be32(0xffffffff, ap + 0);
3021 		else
3022 			put_unaligned_be32(sdebug_capacity, ap + 0);
3023 		if (is_tape) {
3024 			ap[0] = devip->tape_density;
3025 			put_unaligned_be16(devip->tape_blksize, ap + 6);
3026 		} else
3027 			put_unaligned_be16(sdebug_sector_size, ap + 6);
3028 		offset += bd_len;
3029 		ap = arr + offset;
3030 	} else if (16 == bd_len) {
3031 		if (is_tape) {
3032 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
3033 			return check_condition_result;
3034 		}
3035 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
3036 		put_unaligned_be32(sdebug_sector_size, ap + 12);
3037 		offset += bd_len;
3038 		ap = arr + offset;
3039 	}
3040 	if (cmd[2] == 0)
3041 		goto only_bd; /* Only block descriptor requested */
3042 
3043 	/*
3044 	 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
3045 	 *        len += resp_*_pg(ap + len, pcontrol, target);
3046 	 */
3047 	switch (pcode) {
3048 	case 0x1:	/* Read-Write error recovery page, direct access */
3049 		if (subpcode > 0x0 && subpcode < 0xff)
3050 			goto bad_subpcode;
3051 		len = resp_err_recov_pg(ap, pcontrol, target);
3052 		offset += len;
3053 		break;
3054 	case 0x2:	/* Disconnect-Reconnect page, all devices */
3055 		if (subpcode > 0x0 && subpcode < 0xff)
3056 			goto bad_subpcode;
3057 		len = resp_disconnect_pg(ap, pcontrol, target);
3058 		offset += len;
3059 		break;
3060 	case 0x3:       /* Format device page, direct access */
3061 		if (subpcode > 0x0 && subpcode < 0xff)
3062 			goto bad_subpcode;
3063 		if (is_disk) {
3064 			len = resp_format_pg(ap, pcontrol, target);
3065 			offset += len;
3066 		} else {
3067 			goto bad_pcode;
3068 		}
3069 		break;
3070 	case 0x8:	/* Caching page, direct access */
3071 		if (subpcode > 0x0 && subpcode < 0xff)
3072 			goto bad_subpcode;
3073 		if (is_disk || is_zbc) {
3074 			len = resp_caching_pg(ap, pcontrol, target);
3075 			offset += len;
3076 		} else {
3077 			goto bad_pcode;
3078 		}
3079 		break;
3080 	case 0xa:	/* Control Mode page, all devices */
3081 		switch (subpcode) {
3082 		case 0:
3083 			len = resp_ctrl_m_pg(ap, pcontrol, target);
3084 			break;
3085 		case 0x05:
3086 			len = resp_grouping_m_pg(ap, pcontrol, target);
3087 			break;
3088 		case 0xff:
3089 			len = resp_ctrl_m_pg(ap, pcontrol, target);
3090 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
3091 			break;
3092 		default:
3093 			goto bad_subpcode;
3094 		}
3095 		offset += len;
3096 		break;
3097 	case 0xf:	/* Compression Mode Page (tape) */
3098 		if (!is_tape)
3099 			goto bad_pcode;
3100 		len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce);
3101 		offset += len;
3102 		break;
3103 	case 0x11:	/* Partition Mode Page (tape) */
3104 		if (!is_tape)
3105 			goto bad_pcode;
3106 		len = resp_partition_m_pg(ap, pcontrol, target);
3107 		offset += len;
3108 		break;
3109 	case 0x19:	/* if spc==1 then sas phy, control+discover */
3110 		if (subpcode > 0x2 && subpcode < 0xff)
3111 			goto bad_subpcode;
3112 		len = 0;
3113 		if ((0x0 == subpcode) || (0xff == subpcode))
3114 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3115 		if ((0x1 == subpcode) || (0xff == subpcode))
3116 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3117 						  target_dev_id);
3118 		if ((0x2 == subpcode) || (0xff == subpcode))
3119 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
3120 		offset += len;
3121 		break;
3122 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
3123 		if (subpcode > 0x0 && subpcode < 0xff)
3124 			goto bad_subpcode;
3125 		len = resp_iec_m_pg(ap, pcontrol, target);
3126 		offset += len;
3127 		break;
3128 	case 0x3f:	/* Read all Mode pages */
3129 		if (subpcode > 0x0 && subpcode < 0xff)
3130 			goto bad_subpcode;
3131 		len = resp_err_recov_pg(ap, pcontrol, target);
3132 		len += resp_disconnect_pg(ap + len, pcontrol, target);
3133 		if (is_disk) {
3134 			len += resp_format_pg(ap + len, pcontrol, target);
3135 			len += resp_caching_pg(ap + len, pcontrol, target);
3136 		} else if (is_zbc) {
3137 			len += resp_caching_pg(ap + len, pcontrol, target);
3138 		}
3139 		len += resp_ctrl_m_pg(ap + len, pcontrol, target);
3140 		if (0xff == subpcode)
3141 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
3142 		len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3143 		if (0xff == subpcode) {
3144 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3145 						  target_dev_id);
3146 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
3147 		}
3148 		len += resp_iec_m_pg(ap + len, pcontrol, target);
3149 		offset += len;
3150 		break;
3151 	default:
3152 		goto bad_pcode;
3153 	}
3154 only_bd:
3155 	if (msense_6)
3156 		arr[0] = offset - 1;
3157 	else
3158 		put_unaligned_be16((offset - 2), arr + 0);
3159 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
3160 
3161 bad_pcode:
3162 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3163 	return check_condition_result;
3164 
3165 bad_subpcode:
3166 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3167 	return check_condition_result;
3168 }
3169 
3170 #define SDEBUG_MAX_MSELECT_SZ 512
3171 
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3172 static int resp_mode_select(struct scsi_cmnd *scp,
3173 			    struct sdebug_dev_info *devip)
3174 {
3175 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
3176 	int param_len, res, mpage;
3177 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
3178 	unsigned char *cmd = scp->cmnd;
3179 	int mselect6 = (MODE_SELECT == cmd[0]);
3180 
3181 	memset(arr, 0, sizeof(arr));
3182 	pf = cmd[1] & 0x10;
3183 	sp = cmd[1] & 0x1;
3184 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
3185 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
3186 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
3187 		return check_condition_result;
3188 	}
3189 	res = fetch_to_dev_buffer(scp, arr, param_len);
3190 	if (-1 == res)
3191 		return DID_ERROR << 16;
3192 	else if (sdebug_verbose && (res < param_len))
3193 		sdev_printk(KERN_INFO, scp->device,
3194 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
3195 			    __func__, param_len, res);
3196 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
3197 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
3198 	off = (mselect6 ? 4 : 8);
3199 	if (scp->device->type == TYPE_TAPE) {
3200 		int blksize;
3201 
3202 		if (bd_len != 8) {
3203 			mk_sense_invalid_fld(scp, SDEB_IN_DATA,
3204 					mselect6 ? 3 : 6, -1);
3205 			return check_condition_result;
3206 		}
3207 		if (arr[off] == TAPE_BAD_DENSITY) {
3208 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3209 			return check_condition_result;
3210 		}
3211 		blksize = get_unaligned_be16(arr + off + 6);
3212 		if (blksize != 0 &&
3213 			(blksize < TAPE_MIN_BLKSIZE ||
3214 				blksize > TAPE_MAX_BLKSIZE ||
3215 				(blksize % 4) != 0)) {
3216 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1);
3217 			return check_condition_result;
3218 		}
3219 		devip->tape_density = arr[off];
3220 		devip->tape_blksize = blksize;
3221 	}
3222 	off += bd_len;
3223 	if (off >= res)
3224 		return 0; /* No page written, just descriptors */
3225 	if (md_len > 2) {
3226 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3227 		return check_condition_result;
3228 	}
3229 	mpage = arr[off] & 0x3f;
3230 	ps = !!(arr[off] & 0x80);
3231 	if (ps) {
3232 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
3233 		return check_condition_result;
3234 	}
3235 	spf = !!(arr[off] & 0x40);
3236 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
3237 		       (arr[off + 1] + 2);
3238 	if ((pg_len + off) > param_len) {
3239 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
3240 				PARAMETER_LIST_LENGTH_ERR, 0);
3241 		return check_condition_result;
3242 	}
3243 	switch (mpage) {
3244 	case 0x8:      /* Caching Mode page */
3245 		if (caching_pg[1] == arr[off + 1]) {
3246 			memcpy(caching_pg + 2, arr + off + 2,
3247 			       sizeof(caching_pg) - 2);
3248 			goto set_mode_changed_ua;
3249 		}
3250 		break;
3251 	case 0xa:      /* Control Mode page */
3252 		if (ctrl_m_pg[1] == arr[off + 1]) {
3253 			memcpy(ctrl_m_pg + 2, arr + off + 2,
3254 			       sizeof(ctrl_m_pg) - 2);
3255 			if (ctrl_m_pg[4] & 0x8)
3256 				sdebug_wp = true;
3257 			else
3258 				sdebug_wp = false;
3259 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
3260 			goto set_mode_changed_ua;
3261 		}
3262 		break;
3263 	case 0xf:       /* Compression mode page */
3264 		if (scp->device->type != TYPE_TAPE)
3265 			goto bad_pcode;
3266 		if ((arr[off + 2] & 0x40) != 0) {
3267 			devip->tape_dce = (arr[off + 2] & 0x80) != 0;
3268 			return 0;
3269 		}
3270 		break;
3271 	case 0x11:	/* Medium Partition Mode Page (tape) */
3272 		if (scp->device->type == TYPE_TAPE) {
3273 			int fld;
3274 
3275 			fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
3276 			if (fld == 0)
3277 				return 0;
3278 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1);
3279 			return check_condition_result;
3280 		}
3281 		break;
3282 	case 0x1c:      /* Informational Exceptions Mode page */
3283 		if (iec_m_pg[1] == arr[off + 1]) {
3284 			memcpy(iec_m_pg + 2, arr + off + 2,
3285 			       sizeof(iec_m_pg) - 2);
3286 			goto set_mode_changed_ua;
3287 		}
3288 		break;
3289 	default:
3290 		break;
3291 	}
3292 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
3293 	return check_condition_result;
3294 set_mode_changed_ua:
3295 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
3296 	return 0;
3297 
3298 bad_pcode:
3299 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3300 	return check_condition_result;
3301 }
3302 
resp_temp_l_pg(unsigned char * arr)3303 static int resp_temp_l_pg(unsigned char *arr)
3304 {
3305 	static const unsigned char temp_l_pg[] = {
3306 		0x0, 0x0, 0x3, 0x2, 0x0, 38,
3307 		0x0, 0x1, 0x3, 0x2, 0x0, 65,
3308 	};
3309 
3310 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
3311 	return sizeof(temp_l_pg);
3312 }
3313 
resp_ie_l_pg(unsigned char * arr)3314 static int resp_ie_l_pg(unsigned char *arr)
3315 {
3316 	static const unsigned char ie_l_pg[] = {
3317 		0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
3318 	};
3319 
3320 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
3321 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
3322 		arr[4] = THRESHOLD_EXCEEDED;
3323 		arr[5] = 0xff;
3324 	}
3325 	return sizeof(ie_l_pg);
3326 }
3327 
resp_env_rep_l_spg(unsigned char * arr)3328 static int resp_env_rep_l_spg(unsigned char *arr)
3329 {
3330 	static const unsigned char env_rep_l_spg[] = {
3331 		0x0, 0x0, 0x23, 0x8,
3332 		0x0, 40, 72, 0xff, 45, 18, 0, 0,
3333 		0x1, 0x0, 0x23, 0x8,
3334 		0x0, 55, 72, 35, 55, 45, 0, 0,
3335 	};
3336 
3337 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
3338 	return sizeof(env_rep_l_spg);
3339 }
3340 
3341 #define SDEBUG_MAX_LSENSE_SZ 512
3342 
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3343 static int resp_log_sense(struct scsi_cmnd *scp,
3344 			  struct sdebug_dev_info *devip)
3345 {
3346 	int ppc, sp, pcode, subpcode;
3347 	u32 alloc_len, len, n;
3348 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3349 	unsigned char *cmd = scp->cmnd;
3350 
3351 	memset(arr, 0, sizeof(arr));
3352 	ppc = cmd[1] & 0x2;
3353 	sp = cmd[1] & 0x1;
3354 	if (ppc || sp) {
3355 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3356 		return check_condition_result;
3357 	}
3358 	pcode = cmd[2] & 0x3f;
3359 	subpcode = cmd[3] & 0xff;
3360 	alloc_len = get_unaligned_be16(cmd + 7);
3361 	arr[0] = pcode;
3362 	if (0 == subpcode) {
3363 		switch (pcode) {
3364 		case 0x0:	/* Supported log pages log page */
3365 			n = 4;
3366 			arr[n++] = 0x0;		/* this page */
3367 			arr[n++] = 0xd;		/* Temperature */
3368 			arr[n++] = 0x2f;	/* Informational exceptions */
3369 			arr[3] = n - 4;
3370 			break;
3371 		case 0xd:	/* Temperature log page */
3372 			arr[3] = resp_temp_l_pg(arr + 4);
3373 			break;
3374 		case 0x2f:	/* Informational exceptions log page */
3375 			arr[3] = resp_ie_l_pg(arr + 4);
3376 			break;
3377 		default:
3378 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3379 			return check_condition_result;
3380 		}
3381 	} else if (0xff == subpcode) {
3382 		arr[0] |= 0x40;
3383 		arr[1] = subpcode;
3384 		switch (pcode) {
3385 		case 0x0:	/* Supported log pages and subpages log page */
3386 			n = 4;
3387 			arr[n++] = 0x0;
3388 			arr[n++] = 0x0;		/* 0,0 page */
3389 			arr[n++] = 0x0;
3390 			arr[n++] = 0xff;	/* this page */
3391 			arr[n++] = 0xd;
3392 			arr[n++] = 0x0;		/* Temperature */
3393 			arr[n++] = 0xd;
3394 			arr[n++] = 0x1;		/* Environment reporting */
3395 			arr[n++] = 0xd;
3396 			arr[n++] = 0xff;	/* all 0xd subpages */
3397 			arr[n++] = 0x2f;
3398 			arr[n++] = 0x0;	/* Informational exceptions */
3399 			arr[n++] = 0x2f;
3400 			arr[n++] = 0xff;	/* all 0x2f subpages */
3401 			arr[3] = n - 4;
3402 			break;
3403 		case 0xd:	/* Temperature subpages */
3404 			n = 4;
3405 			arr[n++] = 0xd;
3406 			arr[n++] = 0x0;		/* Temperature */
3407 			arr[n++] = 0xd;
3408 			arr[n++] = 0x1;		/* Environment reporting */
3409 			arr[n++] = 0xd;
3410 			arr[n++] = 0xff;	/* these subpages */
3411 			arr[3] = n - 4;
3412 			break;
3413 		case 0x2f:	/* Informational exceptions subpages */
3414 			n = 4;
3415 			arr[n++] = 0x2f;
3416 			arr[n++] = 0x0;		/* Informational exceptions */
3417 			arr[n++] = 0x2f;
3418 			arr[n++] = 0xff;	/* these subpages */
3419 			arr[3] = n - 4;
3420 			break;
3421 		default:
3422 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3423 			return check_condition_result;
3424 		}
3425 	} else if (subpcode > 0) {
3426 		arr[0] |= 0x40;
3427 		arr[1] = subpcode;
3428 		if (pcode == 0xd && subpcode == 1)
3429 			arr[3] = resp_env_rep_l_spg(arr + 4);
3430 		else {
3431 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3432 			return check_condition_result;
3433 		}
3434 	} else {
3435 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3436 		return check_condition_result;
3437 	}
3438 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3439 	return fill_from_dev_buffer(scp, arr,
3440 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3441 }
3442 
3443 enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6};
resp_read_blklimits(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3444 static int resp_read_blklimits(struct scsi_cmnd *scp,
3445 			struct sdebug_dev_info *devip)
3446 {
3447 	unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ];
3448 
3449 	arr[0] = 4;
3450 	put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1);
3451 	put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4);
3452 	return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ);
3453 }
3454 
resp_locate(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3455 static int resp_locate(struct scsi_cmnd *scp,
3456 		struct sdebug_dev_info *devip)
3457 {
3458 	unsigned char *cmd = scp->cmnd;
3459 	unsigned int i, pos;
3460 	struct tape_block *blp;
3461 	int partition;
3462 
3463 	if ((cmd[1] & 0x02) != 0) {
3464 		if (cmd[8] >= devip->tape_nbr_partitions) {
3465 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3466 			return check_condition_result;
3467 		}
3468 		devip->tape_partition = cmd[8];
3469 	}
3470 	pos = get_unaligned_be32(cmd + 3);
3471 	partition = devip->tape_partition;
3472 
3473 	for (i = 0, blp = devip->tape_blocks[partition];
3474 	     i < pos && i < devip->tape_eop[partition]; i++, blp++)
3475 		if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3476 			break;
3477 	if (i < pos) {
3478 		devip->tape_location[partition] = i;
3479 		mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0);
3480 		return check_condition_result;
3481 	}
3482 	devip->tape_location[partition] = pos;
3483 
3484 	return 0;
3485 }
3486 
resp_write_filemarks(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3487 static int resp_write_filemarks(struct scsi_cmnd *scp,
3488 		struct sdebug_dev_info *devip)
3489 {
3490 	unsigned char *cmd = scp->cmnd;
3491 	unsigned int i, count, pos;
3492 	u32 data;
3493 	int partition = devip->tape_partition;
3494 
3495 	if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */
3496 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
3497 		return check_condition_result;
3498 	}
3499 	count = get_unaligned_be24(cmd + 2);
3500 	data = TAPE_BLOCK_FM_FLAG;
3501 	for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) {
3502 		if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */
3503 			devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3504 			mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE,
3505 					EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM);
3506 			return check_condition_result;
3507 		}
3508 		(devip->tape_blocks[partition] + pos)->fl_size = data;
3509 	}
3510 	(devip->tape_blocks[partition] + pos)->fl_size =
3511 		TAPE_BLOCK_EOD_FLAG;
3512 	devip->tape_location[partition] = pos;
3513 
3514 	return 0;
3515 }
3516 
resp_space(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3517 static int resp_space(struct scsi_cmnd *scp,
3518 		struct sdebug_dev_info *devip)
3519 {
3520 	unsigned char *cmd = scp->cmnd, code;
3521 	int i = 0, pos, count;
3522 	struct tape_block *blp;
3523 	int partition = devip->tape_partition;
3524 
3525 	count = get_unaligned_be24(cmd + 2);
3526 	if ((count & 0x800000) != 0) /* extend negative to 32-bit count */
3527 		count |= 0xff000000;
3528 	code = cmd[1] & 0x0f;
3529 
3530 	pos = devip->tape_location[partition];
3531 	if (code == 0) { /* blocks */
3532 		if (count < 0) {
3533 			count = (-count);
3534 			pos -= 1;
3535 			for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3536 			     i++) {
3537 				if (pos < 0)
3538 					goto is_bop;
3539 				else if (IS_TAPE_BLOCK_FM(blp->fl_size))
3540 					goto is_fm;
3541 				if (i > 0) {
3542 					pos--;
3543 					blp--;
3544 				}
3545 			}
3546 		} else if (count > 0) {
3547 			for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3548 			     i++, pos++, blp++) {
3549 				if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3550 					goto is_eod;
3551 				if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
3552 					pos += 1;
3553 					goto is_fm;
3554 				}
3555 				if (pos >= devip->tape_eop[partition])
3556 					goto is_eop;
3557 			}
3558 		}
3559 	} else if (code == 1) { /* filemarks */
3560 		if (count < 0) {
3561 			count = (-count);
3562 			if (pos == 0)
3563 				goto is_bop;
3564 			else {
3565 				for (i = 0, blp = devip->tape_blocks[partition] + pos;
3566 				     i < count && pos >= 0; i++, pos--, blp--) {
3567 					for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3568 						     pos >= 0; pos--, blp--)
3569 						; /* empty */
3570 					if (pos < 0)
3571 						goto is_bop;
3572 				}
3573 			}
3574 			pos += 1;
3575 		} else if (count > 0) {
3576 			for (i = 0, blp = devip->tape_blocks[partition] + pos;
3577 			     i < count; i++, pos++, blp++) {
3578 				for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3579 					      !IS_TAPE_BLOCK_EOD(blp->fl_size) &&
3580 					      pos < devip->tape_eop[partition];
3581 				      pos++, blp++)
3582 					; /* empty */
3583 				if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3584 					goto is_eod;
3585 				if (pos >= devip->tape_eop[partition])
3586 					goto is_eop;
3587 			}
3588 		}
3589 	} else if (code == 3) { /* EOD */
3590 		for (blp = devip->tape_blocks[partition] + pos;
3591 		     !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition];
3592 		     pos++, blp++)
3593 			; /* empty */
3594 		if (pos >= devip->tape_eop[partition])
3595 			goto is_eop;
3596 	} else {
3597 		/* sequential filemarks not supported */
3598 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3599 		return check_condition_result;
3600 	}
3601 	devip->tape_location[partition] = pos;
3602 	return 0;
3603 
3604 is_fm:
3605 	devip->tape_location[partition] = pos;
3606 	mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3607 			FILEMARK_DETECTED_ASCQ, count - i,
3608 			SENSE_FLAG_FILEMARK);
3609 	return check_condition_result;
3610 
3611 is_eod:
3612 	devip->tape_location[partition] = pos;
3613 	mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
3614 			EOD_DETECTED_ASCQ, count - i,
3615 			0);
3616 	return check_condition_result;
3617 
3618 is_bop:
3619 	devip->tape_location[partition] = 0;
3620 	mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3621 			BEGINNING_OF_P_M_DETECTED_ASCQ, count - i,
3622 			SENSE_FLAG_EOM);
3623 	devip->tape_location[partition] = 0;
3624 	return check_condition_result;
3625 
3626 is_eop:
3627 	devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3628 	mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE,
3629 			EOP_EOM_DETECTED_ASCQ, (unsigned int)i,
3630 			SENSE_FLAG_EOM);
3631 	return check_condition_result;
3632 }
3633 
3634 enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
resp_read_position(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3635 static int resp_read_position(struct scsi_cmnd *scp,
3636 			struct sdebug_dev_info *devip)
3637 {
3638 	u8 *cmd = scp->cmnd;
3639 	int all_length;
3640 	unsigned char arr[20];
3641 	unsigned int pos;
3642 
3643 	all_length = get_unaligned_be16(cmd + 7);
3644 	if ((cmd[1] & 0xfe) != 0 ||
3645 		all_length != 0) { /* only short form */
3646 		mk_sense_invalid_fld(scp, SDEB_IN_CDB,
3647 				all_length ? 7 : 1, 0);
3648 		return check_condition_result;
3649 	}
3650 	memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
3651 	arr[1] = devip->tape_partition;
3652 	pos = devip->tape_location[devip->tape_partition];
3653 	put_unaligned_be32(pos, arr + 4);
3654 	put_unaligned_be32(pos, arr + 8);
3655 	return fill_from_dev_buffer(scp, arr, SDEBUG_READ_POSITION_ARR_SZ);
3656 }
3657 
resp_rewind(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3658 static int resp_rewind(struct scsi_cmnd *scp,
3659 		struct sdebug_dev_info *devip)
3660 {
3661 	devip->tape_location[devip->tape_partition] = 0;
3662 
3663 	return 0;
3664 }
3665 
partition_tape(struct sdebug_dev_info * devip,int nbr_partitions,int part_0_size,int part_1_size)3666 static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions,
3667 			int part_0_size, int part_1_size)
3668 {
3669 	int i;
3670 
3671 	if (part_0_size + part_1_size > TAPE_UNITS)
3672 		return -1;
3673 	devip->tape_eop[0] = part_0_size;
3674 	devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG;
3675 	devip->tape_eop[1] = part_1_size;
3676 	devip->tape_blocks[1] = devip->tape_blocks[0] +
3677 			devip->tape_eop[0];
3678 	devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG;
3679 
3680 	for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++)
3681 		devip->tape_location[i] = 0;
3682 
3683 	devip->tape_nbr_partitions = nbr_partitions;
3684 	devip->tape_partition = 0;
3685 
3686 	partition_pg[3] = nbr_partitions - 1;
3687 	put_unaligned_be16(devip->tape_eop[0], partition_pg + 8);
3688 	put_unaligned_be16(devip->tape_eop[1], partition_pg + 10);
3689 
3690 	return nbr_partitions;
3691 }
3692 
resp_format_medium(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3693 static int resp_format_medium(struct scsi_cmnd *scp,
3694 			struct sdebug_dev_info *devip)
3695 {
3696 	int res = 0;
3697 	unsigned char *cmd = scp->cmnd;
3698 
3699 	if (cmd[2] > 2) {
3700 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
3701 		return check_condition_result;
3702 	}
3703 	if (cmd[2] != 0) {
3704 		if (devip->tape_pending_nbr_partitions > 0) {
3705 			res = partition_tape(devip,
3706 					devip->tape_pending_nbr_partitions,
3707 					devip->tape_pending_part_0_size,
3708 					devip->tape_pending_part_1_size);
3709 		} else
3710 			res = partition_tape(devip, devip->tape_nbr_partitions,
3711 					devip->tape_eop[0], devip->tape_eop[1]);
3712 	} else
3713 		res = partition_tape(devip, 1, TAPE_UNITS, 0);
3714 	if (res < 0)
3715 		return -EINVAL;
3716 
3717 	devip->tape_pending_nbr_partitions = -1;
3718 
3719 	return 0;
3720 }
3721 
resp_erase(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3722 static int resp_erase(struct scsi_cmnd *scp,
3723 		struct sdebug_dev_info *devip)
3724 {
3725 	int partition = devip->tape_partition;
3726 	int pos = devip->tape_location[partition];
3727 	struct tape_block *blp;
3728 
3729 	blp = devip->tape_blocks[partition] + pos;
3730 	blp->fl_size = TAPE_BLOCK_EOD_FLAG;
3731 
3732 	return 0;
3733 }
3734 
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)3735 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3736 {
3737 	return devip->nr_zones != 0;
3738 }
3739 
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)3740 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3741 					unsigned long long lba)
3742 {
3743 	u32 zno = lba >> devip->zsize_shift;
3744 	struct sdeb_zone_state *zsp;
3745 
3746 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3747 		return &devip->zstate[zno];
3748 
3749 	/*
3750 	 * If the zone capacity is less than the zone size, adjust for gap
3751 	 * zones.
3752 	 */
3753 	zno = 2 * zno - devip->nr_conv_zones;
3754 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3755 	zsp = &devip->zstate[zno];
3756 	if (lba >= zsp->z_start + zsp->z_size)
3757 		zsp++;
3758 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3759 	return zsp;
3760 }
3761 
zbc_zone_is_conv(struct sdeb_zone_state * zsp)3762 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3763 {
3764 	return zsp->z_type == ZBC_ZTYPE_CNV;
3765 }
3766 
zbc_zone_is_gap(struct sdeb_zone_state * zsp)3767 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3768 {
3769 	return zsp->z_type == ZBC_ZTYPE_GAP;
3770 }
3771 
zbc_zone_is_seq(struct sdeb_zone_state * zsp)3772 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3773 {
3774 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3775 }
3776 
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3777 static void zbc_close_zone(struct sdebug_dev_info *devip,
3778 			   struct sdeb_zone_state *zsp)
3779 {
3780 	enum sdebug_z_cond zc;
3781 
3782 	if (!zbc_zone_is_seq(zsp))
3783 		return;
3784 
3785 	zc = zsp->z_cond;
3786 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3787 		return;
3788 
3789 	if (zc == ZC2_IMPLICIT_OPEN)
3790 		devip->nr_imp_open--;
3791 	else
3792 		devip->nr_exp_open--;
3793 
3794 	if (zsp->z_wp == zsp->z_start) {
3795 		zsp->z_cond = ZC1_EMPTY;
3796 	} else {
3797 		zsp->z_cond = ZC4_CLOSED;
3798 		devip->nr_closed++;
3799 	}
3800 }
3801 
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)3802 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3803 {
3804 	struct sdeb_zone_state *zsp = &devip->zstate[0];
3805 	unsigned int i;
3806 
3807 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
3808 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3809 			zbc_close_zone(devip, zsp);
3810 			return;
3811 		}
3812 	}
3813 }
3814 
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)3815 static void zbc_open_zone(struct sdebug_dev_info *devip,
3816 			  struct sdeb_zone_state *zsp, bool explicit)
3817 {
3818 	enum sdebug_z_cond zc;
3819 
3820 	if (!zbc_zone_is_seq(zsp))
3821 		return;
3822 
3823 	zc = zsp->z_cond;
3824 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3825 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
3826 		return;
3827 
3828 	/* Close an implicit open zone if necessary */
3829 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3830 		zbc_close_zone(devip, zsp);
3831 	else if (devip->max_open &&
3832 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3833 		zbc_close_imp_open_zone(devip);
3834 
3835 	if (zsp->z_cond == ZC4_CLOSED)
3836 		devip->nr_closed--;
3837 	if (explicit) {
3838 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
3839 		devip->nr_exp_open++;
3840 	} else {
3841 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
3842 		devip->nr_imp_open++;
3843 	}
3844 }
3845 
zbc_set_zone_full(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3846 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3847 				     struct sdeb_zone_state *zsp)
3848 {
3849 	switch (zsp->z_cond) {
3850 	case ZC2_IMPLICIT_OPEN:
3851 		devip->nr_imp_open--;
3852 		break;
3853 	case ZC3_EXPLICIT_OPEN:
3854 		devip->nr_exp_open--;
3855 		break;
3856 	default:
3857 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3858 			  zsp->z_start, zsp->z_cond);
3859 		break;
3860 	}
3861 	zsp->z_cond = ZC5_FULL;
3862 }
3863 
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)3864 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3865 		       unsigned long long lba, unsigned int num)
3866 {
3867 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3868 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3869 
3870 	if (!zbc_zone_is_seq(zsp))
3871 		return;
3872 
3873 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3874 		zsp->z_wp += num;
3875 		if (zsp->z_wp >= zend)
3876 			zbc_set_zone_full(devip, zsp);
3877 		return;
3878 	}
3879 
3880 	while (num) {
3881 		if (lba != zsp->z_wp)
3882 			zsp->z_non_seq_resource = true;
3883 
3884 		end = lba + num;
3885 		if (end >= zend) {
3886 			n = zend - lba;
3887 			zsp->z_wp = zend;
3888 		} else if (end > zsp->z_wp) {
3889 			n = num;
3890 			zsp->z_wp = end;
3891 		} else {
3892 			n = num;
3893 		}
3894 		if (zsp->z_wp >= zend)
3895 			zbc_set_zone_full(devip, zsp);
3896 
3897 		num -= n;
3898 		lba += n;
3899 		if (num) {
3900 			zsp++;
3901 			zend = zsp->z_start + zsp->z_size;
3902 		}
3903 	}
3904 }
3905 
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3906 static int check_zbc_access_params(struct scsi_cmnd *scp,
3907 			unsigned long long lba, unsigned int num, bool write)
3908 {
3909 	struct scsi_device *sdp = scp->device;
3910 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3911 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3912 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3913 
3914 	if (!write) {
3915 		/* For host-managed, reads cannot cross zone types boundaries */
3916 		if (zsp->z_type != zsp_end->z_type) {
3917 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3918 					LBA_OUT_OF_RANGE,
3919 					READ_INVDATA_ASCQ);
3920 			return check_condition_result;
3921 		}
3922 		return 0;
3923 	}
3924 
3925 	/* Writing into a gap zone is not allowed */
3926 	if (zbc_zone_is_gap(zsp)) {
3927 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3928 				ATTEMPT_ACCESS_GAP);
3929 		return check_condition_result;
3930 	}
3931 
3932 	/* No restrictions for writes within conventional zones */
3933 	if (zbc_zone_is_conv(zsp)) {
3934 		if (!zbc_zone_is_conv(zsp_end)) {
3935 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3936 					LBA_OUT_OF_RANGE,
3937 					WRITE_BOUNDARY_ASCQ);
3938 			return check_condition_result;
3939 		}
3940 		return 0;
3941 	}
3942 
3943 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3944 		/* Writes cannot cross sequential zone boundaries */
3945 		if (zsp_end != zsp) {
3946 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3947 					LBA_OUT_OF_RANGE,
3948 					WRITE_BOUNDARY_ASCQ);
3949 			return check_condition_result;
3950 		}
3951 		/* Cannot write full zones */
3952 		if (zsp->z_cond == ZC5_FULL) {
3953 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3954 					INVALID_FIELD_IN_CDB, 0);
3955 			return check_condition_result;
3956 		}
3957 		/* Writes must be aligned to the zone WP */
3958 		if (lba != zsp->z_wp) {
3959 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3960 					LBA_OUT_OF_RANGE,
3961 					UNALIGNED_WRITE_ASCQ);
3962 			return check_condition_result;
3963 		}
3964 	}
3965 
3966 	/* Handle implicit open of closed and empty zones */
3967 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3968 		if (devip->max_open &&
3969 		    devip->nr_exp_open >= devip->max_open) {
3970 			mk_sense_buffer(scp, DATA_PROTECT,
3971 					INSUFF_RES_ASC,
3972 					INSUFF_ZONE_ASCQ);
3973 			return check_condition_result;
3974 		}
3975 		zbc_open_zone(devip, zsp, false);
3976 	}
3977 
3978 	return 0;
3979 }
3980 
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3981 static inline int check_device_access_params
3982 			(struct scsi_cmnd *scp, unsigned long long lba,
3983 			 unsigned int num, bool write)
3984 {
3985 	struct scsi_device *sdp = scp->device;
3986 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3987 
3988 	if (lba + num > sdebug_capacity) {
3989 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3990 		return check_condition_result;
3991 	}
3992 	/* transfer length excessive (tie in to block limits VPD page) */
3993 	if (num > sdebug_store_sectors) {
3994 		/* needs work to find which cdb byte 'num' comes from */
3995 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3996 		return check_condition_result;
3997 	}
3998 	if (write && unlikely(sdebug_wp)) {
3999 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
4000 		return check_condition_result;
4001 	}
4002 	if (sdebug_dev_is_zoned(devip))
4003 		return check_zbc_access_params(scp, lba, num, write);
4004 
4005 	return 0;
4006 }
4007 
4008 /*
4009  * Note: if BUG_ON() fires it usually indicates a problem with the parser
4010  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
4011  * that access any of the "stores" in struct sdeb_store_info should call this
4012  * function with bug_if_fake_rw set to true.
4013  */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)4014 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
4015 						bool bug_if_fake_rw)
4016 {
4017 	if (sdebug_fake_rw) {
4018 		BUG_ON(bug_if_fake_rw);	/* See note above */
4019 		return NULL;
4020 	}
4021 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
4022 }
4023 
4024 static inline void
sdeb_read_lock(rwlock_t * lock)4025 sdeb_read_lock(rwlock_t *lock)
4026 {
4027 	if (sdebug_no_rwlock)
4028 		__acquire(lock);
4029 	else
4030 		read_lock(lock);
4031 }
4032 
4033 static inline void
sdeb_read_unlock(rwlock_t * lock)4034 sdeb_read_unlock(rwlock_t *lock)
4035 {
4036 	if (sdebug_no_rwlock)
4037 		__release(lock);
4038 	else
4039 		read_unlock(lock);
4040 }
4041 
4042 static inline void
sdeb_write_lock(rwlock_t * lock)4043 sdeb_write_lock(rwlock_t *lock)
4044 {
4045 	if (sdebug_no_rwlock)
4046 		__acquire(lock);
4047 	else
4048 		write_lock(lock);
4049 }
4050 
4051 static inline void
sdeb_write_unlock(rwlock_t * lock)4052 sdeb_write_unlock(rwlock_t *lock)
4053 {
4054 	if (sdebug_no_rwlock)
4055 		__release(lock);
4056 	else
4057 		write_unlock(lock);
4058 }
4059 
4060 static inline void
sdeb_data_read_lock(struct sdeb_store_info * sip)4061 sdeb_data_read_lock(struct sdeb_store_info *sip)
4062 {
4063 	BUG_ON(!sip);
4064 
4065 	sdeb_read_lock(&sip->macc_data_lck);
4066 }
4067 
4068 static inline void
sdeb_data_read_unlock(struct sdeb_store_info * sip)4069 sdeb_data_read_unlock(struct sdeb_store_info *sip)
4070 {
4071 	BUG_ON(!sip);
4072 
4073 	sdeb_read_unlock(&sip->macc_data_lck);
4074 }
4075 
4076 static inline void
sdeb_data_write_lock(struct sdeb_store_info * sip)4077 sdeb_data_write_lock(struct sdeb_store_info *sip)
4078 {
4079 	BUG_ON(!sip);
4080 
4081 	sdeb_write_lock(&sip->macc_data_lck);
4082 }
4083 
4084 static inline void
sdeb_data_write_unlock(struct sdeb_store_info * sip)4085 sdeb_data_write_unlock(struct sdeb_store_info *sip)
4086 {
4087 	BUG_ON(!sip);
4088 
4089 	sdeb_write_unlock(&sip->macc_data_lck);
4090 }
4091 
4092 static inline void
sdeb_data_sector_read_lock(struct sdeb_store_info * sip)4093 sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
4094 {
4095 	BUG_ON(!sip);
4096 
4097 	sdeb_read_lock(&sip->macc_sector_lck);
4098 }
4099 
4100 static inline void
sdeb_data_sector_read_unlock(struct sdeb_store_info * sip)4101 sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
4102 {
4103 	BUG_ON(!sip);
4104 
4105 	sdeb_read_unlock(&sip->macc_sector_lck);
4106 }
4107 
4108 static inline void
sdeb_data_sector_write_lock(struct sdeb_store_info * sip)4109 sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
4110 {
4111 	BUG_ON(!sip);
4112 
4113 	sdeb_write_lock(&sip->macc_sector_lck);
4114 }
4115 
4116 static inline void
sdeb_data_sector_write_unlock(struct sdeb_store_info * sip)4117 sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
4118 {
4119 	BUG_ON(!sip);
4120 
4121 	sdeb_write_unlock(&sip->macc_sector_lck);
4122 }
4123 
4124 /*
4125  * Atomic locking:
4126  * We simplify the atomic model to allow only 1x atomic write and many non-
4127  * atomic reads or writes for all LBAs.
4128 
4129  * A RW lock has a similar bahaviour:
4130  * Only 1x writer and many readers.
4131 
4132  * So use a RW lock for per-device read and write locking:
4133  * An atomic access grabs the lock as a writer and non-atomic grabs the lock
4134  * as a reader.
4135  */
4136 
4137 static inline void
sdeb_data_lock(struct sdeb_store_info * sip,bool atomic)4138 sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
4139 {
4140 	if (atomic)
4141 		sdeb_data_write_lock(sip);
4142 	else
4143 		sdeb_data_read_lock(sip);
4144 }
4145 
4146 static inline void
sdeb_data_unlock(struct sdeb_store_info * sip,bool atomic)4147 sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
4148 {
4149 	if (atomic)
4150 		sdeb_data_write_unlock(sip);
4151 	else
4152 		sdeb_data_read_unlock(sip);
4153 }
4154 
4155 /* Allow many reads but only 1x write per sector */
4156 static inline void
sdeb_data_sector_lock(struct sdeb_store_info * sip,bool do_write)4157 sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
4158 {
4159 	if (do_write)
4160 		sdeb_data_sector_write_lock(sip);
4161 	else
4162 		sdeb_data_sector_read_lock(sip);
4163 }
4164 
4165 static inline void
sdeb_data_sector_unlock(struct sdeb_store_info * sip,bool do_write)4166 sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
4167 {
4168 	if (do_write)
4169 		sdeb_data_sector_write_unlock(sip);
4170 	else
4171 		sdeb_data_sector_read_unlock(sip);
4172 }
4173 
4174 static inline void
sdeb_meta_read_lock(struct sdeb_store_info * sip)4175 sdeb_meta_read_lock(struct sdeb_store_info *sip)
4176 {
4177 	if (sdebug_no_rwlock) {
4178 		if (sip)
4179 			__acquire(&sip->macc_meta_lck);
4180 		else
4181 			__acquire(&sdeb_fake_rw_lck);
4182 	} else {
4183 		if (sip)
4184 			read_lock(&sip->macc_meta_lck);
4185 		else
4186 			read_lock(&sdeb_fake_rw_lck);
4187 	}
4188 }
4189 
4190 static inline void
sdeb_meta_read_unlock(struct sdeb_store_info * sip)4191 sdeb_meta_read_unlock(struct sdeb_store_info *sip)
4192 {
4193 	if (sdebug_no_rwlock) {
4194 		if (sip)
4195 			__release(&sip->macc_meta_lck);
4196 		else
4197 			__release(&sdeb_fake_rw_lck);
4198 	} else {
4199 		if (sip)
4200 			read_unlock(&sip->macc_meta_lck);
4201 		else
4202 			read_unlock(&sdeb_fake_rw_lck);
4203 	}
4204 }
4205 
4206 static inline void
sdeb_meta_write_lock(struct sdeb_store_info * sip)4207 sdeb_meta_write_lock(struct sdeb_store_info *sip)
4208 {
4209 	if (sdebug_no_rwlock) {
4210 		if (sip)
4211 			__acquire(&sip->macc_meta_lck);
4212 		else
4213 			__acquire(&sdeb_fake_rw_lck);
4214 	} else {
4215 		if (sip)
4216 			write_lock(&sip->macc_meta_lck);
4217 		else
4218 			write_lock(&sdeb_fake_rw_lck);
4219 	}
4220 }
4221 
4222 static inline void
sdeb_meta_write_unlock(struct sdeb_store_info * sip)4223 sdeb_meta_write_unlock(struct sdeb_store_info *sip)
4224 {
4225 	if (sdebug_no_rwlock) {
4226 		if (sip)
4227 			__release(&sip->macc_meta_lck);
4228 		else
4229 			__release(&sdeb_fake_rw_lck);
4230 	} else {
4231 		if (sip)
4232 			write_unlock(&sip->macc_meta_lck);
4233 		else
4234 			write_unlock(&sdeb_fake_rw_lck);
4235 	}
4236 }
4237 
4238 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,u8 group_number,bool do_write,bool atomic)4239 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
4240 			    u32 sg_skip, u64 lba, u32 num, u8 group_number,
4241 			    bool do_write, bool atomic)
4242 {
4243 	int ret;
4244 	u64 block;
4245 	enum dma_data_direction dir;
4246 	struct scsi_data_buffer *sdb = &scp->sdb;
4247 	u8 *fsp;
4248 	int i, total = 0;
4249 
4250 	/*
4251 	 * Even though reads are inherently atomic (in this driver), we expect
4252 	 * the atomic flag only for writes.
4253 	 */
4254 	if (!do_write && atomic)
4255 		return -1;
4256 
4257 	if (do_write) {
4258 		dir = DMA_TO_DEVICE;
4259 		write_since_sync = true;
4260 	} else {
4261 		dir = DMA_FROM_DEVICE;
4262 	}
4263 
4264 	if (!sdb->length || !sip)
4265 		return 0;
4266 	if (scp->sc_data_direction != dir)
4267 		return -1;
4268 
4269 	if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
4270 		atomic_long_inc(&writes_by_group_number[group_number]);
4271 
4272 	fsp = sip->storep;
4273 
4274 	block = do_div(lba, sdebug_store_sectors);
4275 
4276 	/* Only allow 1x atomic write or multiple non-atomic writes at any given time */
4277 	sdeb_data_lock(sip, atomic);
4278 	for (i = 0; i < num; i++) {
4279 		/* We shouldn't need to lock for atomic writes, but do it anyway */
4280 		sdeb_data_sector_lock(sip, do_write);
4281 		ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4282 		   fsp + (block * sdebug_sector_size),
4283 		   sdebug_sector_size, sg_skip, do_write);
4284 		sdeb_data_sector_unlock(sip, do_write);
4285 		total += ret;
4286 		if (ret != sdebug_sector_size)
4287 			break;
4288 		sg_skip += sdebug_sector_size;
4289 		if (++block >= sdebug_store_sectors)
4290 			block = 0;
4291 	}
4292 	sdeb_data_unlock(sip, atomic);
4293 
4294 	return total;
4295 }
4296 
4297 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)4298 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
4299 {
4300 	struct scsi_data_buffer *sdb = &scp->sdb;
4301 
4302 	if (!sdb->length)
4303 		return 0;
4304 	if (scp->sc_data_direction != DMA_TO_DEVICE)
4305 		return -1;
4306 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
4307 			      num * sdebug_sector_size, 0, true);
4308 }
4309 
4310 /* If sip->storep+lba compares equal to arr(num), then copy top half of
4311  * arr into sip->storep+lba and return true. If comparison fails then
4312  * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)4313 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
4314 			      const u8 *arr, bool compare_only)
4315 {
4316 	bool res;
4317 	u64 block, rest = 0;
4318 	u32 store_blks = sdebug_store_sectors;
4319 	u32 lb_size = sdebug_sector_size;
4320 	u8 *fsp = sip->storep;
4321 
4322 	block = do_div(lba, store_blks);
4323 	if (block + num > store_blks)
4324 		rest = block + num - store_blks;
4325 
4326 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4327 	if (!res)
4328 		return res;
4329 	if (rest)
4330 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
4331 			     rest * lb_size);
4332 	if (!res)
4333 		return res;
4334 	if (compare_only)
4335 		return true;
4336 	arr += num * lb_size;
4337 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4338 	if (rest)
4339 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
4340 	return res;
4341 }
4342 
dif_compute_csum(const void * buf,int len)4343 static __be16 dif_compute_csum(const void *buf, int len)
4344 {
4345 	__be16 csum;
4346 
4347 	if (sdebug_guard)
4348 		csum = (__force __be16)ip_compute_csum(buf, len);
4349 	else
4350 		csum = cpu_to_be16(crc_t10dif(buf, len));
4351 
4352 	return csum;
4353 }
4354 
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)4355 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
4356 		      sector_t sector, u32 ei_lba)
4357 {
4358 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
4359 
4360 	if (sdt->guard_tag != csum) {
4361 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
4362 			(unsigned long)sector,
4363 			be16_to_cpu(sdt->guard_tag),
4364 			be16_to_cpu(csum));
4365 		return 0x01;
4366 	}
4367 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
4368 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
4369 		pr_err("REF check failed on sector %lu\n",
4370 			(unsigned long)sector);
4371 		return 0x03;
4372 	}
4373 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4374 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
4375 		pr_err("REF check failed on sector %lu\n",
4376 			(unsigned long)sector);
4377 		return 0x03;
4378 	}
4379 	return 0;
4380 }
4381 
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)4382 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
4383 			  unsigned int sectors, bool read)
4384 {
4385 	size_t resid;
4386 	void *paddr;
4387 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4388 						scp->device->hostdata, true);
4389 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
4390 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
4391 	struct sg_mapping_iter miter;
4392 
4393 	/* Bytes of protection data to copy into sgl */
4394 	resid = sectors * sizeof(*dif_storep);
4395 
4396 	sg_miter_start(&miter, scsi_prot_sglist(scp),
4397 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
4398 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
4399 
4400 	while (sg_miter_next(&miter) && resid > 0) {
4401 		size_t len = min_t(size_t, miter.length, resid);
4402 		void *start = dif_store(sip, sector);
4403 		size_t rest = 0;
4404 
4405 		if (dif_store_end < start + len)
4406 			rest = start + len - dif_store_end;
4407 
4408 		paddr = miter.addr;
4409 
4410 		if (read)
4411 			memcpy(paddr, start, len - rest);
4412 		else
4413 			memcpy(start, paddr, len - rest);
4414 
4415 		if (rest) {
4416 			if (read)
4417 				memcpy(paddr + len - rest, dif_storep, rest);
4418 			else
4419 				memcpy(dif_storep, paddr + len - rest, rest);
4420 		}
4421 
4422 		sector += len / sizeof(*dif_storep);
4423 		resid -= len;
4424 	}
4425 	sg_miter_stop(&miter);
4426 }
4427 
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)4428 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
4429 			    unsigned int sectors, u32 ei_lba)
4430 {
4431 	int ret = 0;
4432 	unsigned int i;
4433 	sector_t sector;
4434 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4435 						scp->device->hostdata, true);
4436 	struct t10_pi_tuple *sdt;
4437 
4438 	for (i = 0; i < sectors; i++, ei_lba++) {
4439 		sector = start_sec + i;
4440 		sdt = dif_store(sip, sector);
4441 
4442 		if (sdt->app_tag == cpu_to_be16(0xffff))
4443 			continue;
4444 
4445 		/*
4446 		 * Because scsi_debug acts as both initiator and
4447 		 * target we proceed to verify the PI even if
4448 		 * RDPROTECT=3. This is done so the "initiator" knows
4449 		 * which type of error to return. Otherwise we would
4450 		 * have to iterate over the PI twice.
4451 		 */
4452 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
4453 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
4454 					 sector, ei_lba);
4455 			if (ret) {
4456 				dif_errors++;
4457 				break;
4458 			}
4459 		}
4460 	}
4461 
4462 	dif_copy_prot(scp, start_sec, sectors, true);
4463 	dix_reads++;
4464 
4465 	return ret;
4466 }
4467 
resp_read_tape(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4468 static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4469 {
4470 	u32 i, num, transfer, size;
4471 	u8 *cmd = scp->cmnd;
4472 	struct scsi_data_buffer *sdb = &scp->sdb;
4473 	int partition = devip->tape_partition;
4474 	u32 pos = devip->tape_location[partition];
4475 	struct tape_block *blp;
4476 	bool fixed, sili;
4477 
4478 	if (cmd[0] != READ_6) { /* Only Read(6) supported */
4479 		mk_sense_invalid_opcode(scp);
4480 		return illegal_condition_result;
4481 	}
4482 	fixed = (cmd[1] & 0x1) != 0;
4483 	sili = (cmd[1] & 0x2) != 0;
4484 	if (fixed && sili) {
4485 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
4486 		return check_condition_result;
4487 	}
4488 
4489 	transfer = get_unaligned_be24(cmd + 2);
4490 	if (fixed) {
4491 		num = transfer;
4492 		size = devip->tape_blksize;
4493 	} else {
4494 		if (transfer < TAPE_MIN_BLKSIZE ||
4495 			transfer > TAPE_MAX_BLKSIZE) {
4496 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4497 			return check_condition_result;
4498 		}
4499 		num = 1;
4500 		size = transfer;
4501 	}
4502 
4503 	for (i = 0, blp = devip->tape_blocks[partition] + pos;
4504 	     i < num && pos < devip->tape_eop[partition];
4505 	     i++, pos++, blp++) {
4506 		devip->tape_location[partition] = pos + 1;
4507 		if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
4508 			mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4509 					FILEMARK_DETECTED_ASCQ, fixed ? num - i : size,
4510 					SENSE_FLAG_FILEMARK);
4511 			scsi_set_resid(scp, (num - i) * size);
4512 			return check_condition_result;
4513 		}
4514 		/* Assume no REW */
4515 		if (IS_TAPE_BLOCK_EOD(blp->fl_size)) {
4516 			mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
4517 					EOD_DETECTED_ASCQ, fixed ? num - i : size,
4518 					0);
4519 			devip->tape_location[partition] = pos;
4520 			scsi_set_resid(scp, (num - i) * size);
4521 			return check_condition_result;
4522 		}
4523 		sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
4524 			size, i * size);
4525 		sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4526 			&(blp->data), 4, i * size, false);
4527 		if (fixed) {
4528 			if (blp->fl_size != devip->tape_blksize) {
4529 				scsi_set_resid(scp, (num - i) * size);
4530 				mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4531 						0, num - i,
4532 						SENSE_FLAG_ILI);
4533 				return check_condition_result;
4534 			}
4535 		} else {
4536 			if (blp->fl_size != size) {
4537 				if (blp->fl_size < size)
4538 					scsi_set_resid(scp, size - blp->fl_size);
4539 				if (!sili) {
4540 					mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4541 							0, size - blp->fl_size,
4542 							SENSE_FLAG_ILI);
4543 					return check_condition_result;
4544 				}
4545 			}
4546 		}
4547 	}
4548 	if (pos >= devip->tape_eop[partition]) {
4549 		mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4550 				EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size,
4551 				SENSE_FLAG_EOM);
4552 		devip->tape_location[partition] = pos - 1;
4553 		return check_condition_result;
4554 	}
4555 	devip->tape_location[partition] = pos;
4556 
4557 	return 0;
4558 }
4559 
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4560 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4561 {
4562 	bool check_prot;
4563 	u32 num;
4564 	u32 ei_lba;
4565 	int ret;
4566 	u64 lba;
4567 	struct sdeb_store_info *sip = devip2sip(devip, true);
4568 	u8 *cmd = scp->cmnd;
4569 	bool meta_data_locked = false;
4570 
4571 	switch (cmd[0]) {
4572 	case READ_16:
4573 		ei_lba = 0;
4574 		lba = get_unaligned_be64(cmd + 2);
4575 		num = get_unaligned_be32(cmd + 10);
4576 		check_prot = true;
4577 		break;
4578 	case READ_10:
4579 		ei_lba = 0;
4580 		lba = get_unaligned_be32(cmd + 2);
4581 		num = get_unaligned_be16(cmd + 7);
4582 		check_prot = true;
4583 		break;
4584 	case READ_6:
4585 		ei_lba = 0;
4586 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4587 		      (u32)(cmd[1] & 0x1f) << 16;
4588 		num = (0 == cmd[4]) ? 256 : cmd[4];
4589 		check_prot = true;
4590 		break;
4591 	case READ_12:
4592 		ei_lba = 0;
4593 		lba = get_unaligned_be32(cmd + 2);
4594 		num = get_unaligned_be32(cmd + 6);
4595 		check_prot = true;
4596 		break;
4597 	case XDWRITEREAD_10:
4598 		ei_lba = 0;
4599 		lba = get_unaligned_be32(cmd + 2);
4600 		num = get_unaligned_be16(cmd + 7);
4601 		check_prot = false;
4602 		break;
4603 	default:	/* assume READ(32) */
4604 		lba = get_unaligned_be64(cmd + 12);
4605 		ei_lba = get_unaligned_be32(cmd + 20);
4606 		num = get_unaligned_be32(cmd + 28);
4607 		check_prot = false;
4608 		break;
4609 	}
4610 	if (unlikely(have_dif_prot && check_prot)) {
4611 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4612 		    (cmd[1] & 0xe0)) {
4613 			mk_sense_invalid_opcode(scp);
4614 			return check_condition_result;
4615 		}
4616 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4617 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4618 		    (cmd[1] & 0xe0) == 0)
4619 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
4620 				    "to DIF device\n");
4621 	}
4622 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
4623 		     atomic_read(&sdeb_inject_pending))) {
4624 		num /= 2;
4625 		atomic_set(&sdeb_inject_pending, 0);
4626 	}
4627 
4628 	/*
4629 	 * When checking device access params, for reads we only check data
4630 	 * versus what is set at init time, so no need to lock.
4631 	 */
4632 	ret = check_device_access_params(scp, lba, num, false);
4633 	if (ret)
4634 		return ret;
4635 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
4636 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
4637 		     ((lba + num) > sdebug_medium_error_start))) {
4638 		/* claim unrecoverable read error */
4639 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
4640 		/* set info field and valid bit for fixed descriptor */
4641 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
4642 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
4643 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
4644 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
4645 			put_unaligned_be32(ret, scp->sense_buffer + 3);
4646 		}
4647 		scsi_set_resid(scp, scsi_bufflen(scp));
4648 		return check_condition_result;
4649 	}
4650 
4651 	if (sdebug_dev_is_zoned(devip) ||
4652 	    (sdebug_dix && scsi_prot_sg_count(scp)))  {
4653 		sdeb_meta_read_lock(sip);
4654 		meta_data_locked = true;
4655 	}
4656 
4657 	/* DIX + T10 DIF */
4658 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4659 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
4660 		case 1: /* Guard tag error */
4661 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4662 				sdeb_meta_read_unlock(sip);
4663 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4664 				return check_condition_result;
4665 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4666 				sdeb_meta_read_unlock(sip);
4667 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4668 				return illegal_condition_result;
4669 			}
4670 			break;
4671 		case 3: /* Reference tag error */
4672 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4673 				sdeb_meta_read_unlock(sip);
4674 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4675 				return check_condition_result;
4676 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4677 				sdeb_meta_read_unlock(sip);
4678 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4679 				return illegal_condition_result;
4680 			}
4681 			break;
4682 		}
4683 	}
4684 
4685 	ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
4686 	if (meta_data_locked)
4687 		sdeb_meta_read_unlock(sip);
4688 	if (unlikely(ret == -1))
4689 		return DID_ERROR << 16;
4690 
4691 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
4692 
4693 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4694 		     atomic_read(&sdeb_inject_pending))) {
4695 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4696 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4697 			atomic_set(&sdeb_inject_pending, 0);
4698 			return check_condition_result;
4699 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4700 			/* Logical block guard check failed */
4701 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4702 			atomic_set(&sdeb_inject_pending, 0);
4703 			return illegal_condition_result;
4704 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
4705 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4706 			atomic_set(&sdeb_inject_pending, 0);
4707 			return illegal_condition_result;
4708 		}
4709 	}
4710 	return 0;
4711 }
4712 
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)4713 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
4714 			     unsigned int sectors, u32 ei_lba)
4715 {
4716 	int ret;
4717 	struct t10_pi_tuple *sdt;
4718 	void *daddr;
4719 	sector_t sector = start_sec;
4720 	int ppage_offset;
4721 	int dpage_offset;
4722 	struct sg_mapping_iter diter;
4723 	struct sg_mapping_iter piter;
4724 
4725 	BUG_ON(scsi_sg_count(SCpnt) == 0);
4726 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
4727 
4728 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
4729 			scsi_prot_sg_count(SCpnt),
4730 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4731 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
4732 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4733 
4734 	/* For each protection page */
4735 	while (sg_miter_next(&piter)) {
4736 		dpage_offset = 0;
4737 		if (WARN_ON(!sg_miter_next(&diter))) {
4738 			ret = 0x01;
4739 			goto out;
4740 		}
4741 
4742 		for (ppage_offset = 0; ppage_offset < piter.length;
4743 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
4744 			/* If we're at the end of the current
4745 			 * data page advance to the next one
4746 			 */
4747 			if (dpage_offset >= diter.length) {
4748 				if (WARN_ON(!sg_miter_next(&diter))) {
4749 					ret = 0x01;
4750 					goto out;
4751 				}
4752 				dpage_offset = 0;
4753 			}
4754 
4755 			sdt = piter.addr + ppage_offset;
4756 			daddr = diter.addr + dpage_offset;
4757 
4758 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
4759 				ret = dif_verify(sdt, daddr, sector, ei_lba);
4760 				if (ret)
4761 					goto out;
4762 			}
4763 
4764 			sector++;
4765 			ei_lba++;
4766 			dpage_offset += sdebug_sector_size;
4767 		}
4768 		diter.consumed = dpage_offset;
4769 		sg_miter_stop(&diter);
4770 	}
4771 	sg_miter_stop(&piter);
4772 
4773 	dif_copy_prot(SCpnt, start_sec, sectors, false);
4774 	dix_writes++;
4775 
4776 	return 0;
4777 
4778 out:
4779 	dif_errors++;
4780 	sg_miter_stop(&diter);
4781 	sg_miter_stop(&piter);
4782 	return ret;
4783 }
4784 
lba_to_map_index(sector_t lba)4785 static unsigned long lba_to_map_index(sector_t lba)
4786 {
4787 	if (sdebug_unmap_alignment)
4788 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
4789 	sector_div(lba, sdebug_unmap_granularity);
4790 	return lba;
4791 }
4792 
map_index_to_lba(unsigned long index)4793 static sector_t map_index_to_lba(unsigned long index)
4794 {
4795 	sector_t lba = index * sdebug_unmap_granularity;
4796 
4797 	if (sdebug_unmap_alignment)
4798 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
4799 	return lba;
4800 }
4801 
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)4802 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
4803 			      unsigned int *num)
4804 {
4805 	sector_t end;
4806 	unsigned int mapped;
4807 	unsigned long index;
4808 	unsigned long next;
4809 
4810 	index = lba_to_map_index(lba);
4811 	mapped = test_bit(index, sip->map_storep);
4812 
4813 	if (mapped)
4814 		next = find_next_zero_bit(sip->map_storep, map_size, index);
4815 	else
4816 		next = find_next_bit(sip->map_storep, map_size, index);
4817 
4818 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
4819 	*num = end - lba;
4820 	return mapped;
4821 }
4822 
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4823 static void map_region(struct sdeb_store_info *sip, sector_t lba,
4824 		       unsigned int len)
4825 {
4826 	sector_t end = lba + len;
4827 
4828 	while (lba < end) {
4829 		unsigned long index = lba_to_map_index(lba);
4830 
4831 		if (index < map_size)
4832 			set_bit(index, sip->map_storep);
4833 
4834 		lba = map_index_to_lba(index + 1);
4835 	}
4836 }
4837 
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4838 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
4839 			 unsigned int len)
4840 {
4841 	sector_t end = lba + len;
4842 	u8 *fsp = sip->storep;
4843 
4844 	while (lba < end) {
4845 		unsigned long index = lba_to_map_index(lba);
4846 
4847 		if (lba == map_index_to_lba(index) &&
4848 		    lba + sdebug_unmap_granularity <= end &&
4849 		    index < map_size) {
4850 			clear_bit(index, sip->map_storep);
4851 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
4852 				memset(fsp + lba * sdebug_sector_size,
4853 				       (sdebug_lbprz & 1) ? 0 : 0xff,
4854 				       sdebug_sector_size *
4855 				       sdebug_unmap_granularity);
4856 			}
4857 			if (sip->dif_storep) {
4858 				memset(sip->dif_storep + lba, 0xff,
4859 				       sizeof(*sip->dif_storep) *
4860 				       sdebug_unmap_granularity);
4861 			}
4862 		}
4863 		lba = map_index_to_lba(index + 1);
4864 	}
4865 }
4866 
resp_write_tape(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4867 static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4868 {
4869 	u32 i, num, transfer, size, written = 0;
4870 	u8 *cmd = scp->cmnd;
4871 	struct scsi_data_buffer *sdb = &scp->sdb;
4872 	int partition = devip->tape_partition;
4873 	int pos = devip->tape_location[partition];
4874 	struct tape_block *blp;
4875 	bool fixed, ew;
4876 
4877 	if (cmd[0] != WRITE_6) { /* Only Write(6) supported */
4878 		mk_sense_invalid_opcode(scp);
4879 		return illegal_condition_result;
4880 	}
4881 
4882 	fixed = (cmd[1] & 1) != 0;
4883 	transfer = get_unaligned_be24(cmd + 2);
4884 	if (fixed) {
4885 		num = transfer;
4886 		size = devip->tape_blksize;
4887 	} else {
4888 		if (transfer < TAPE_MIN_BLKSIZE ||
4889 			transfer > TAPE_MAX_BLKSIZE) {
4890 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4891 			return check_condition_result;
4892 		}
4893 		num = 1;
4894 		size = transfer;
4895 	}
4896 
4897 	scsi_set_resid(scp, num * transfer);
4898 	for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false;
4899 	     i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) {
4900 		blp->fl_size = size;
4901 		sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4902 			&(blp->data), 4, i * size, true);
4903 		written += size;
4904 		scsi_set_resid(scp, num * transfer - written);
4905 		ew |= (pos == devip->tape_eop[partition] - TAPE_EW);
4906 	}
4907 
4908 	devip->tape_location[partition] = pos;
4909 	blp->fl_size = TAPE_BLOCK_EOD_FLAG;
4910 	if (pos >= devip->tape_eop[partition] - 1) {
4911 		mk_sense_info_tape(scp, VOLUME_OVERFLOW,
4912 				NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4913 				fixed ? num - i : transfer,
4914 				SENSE_FLAG_EOM);
4915 		return check_condition_result;
4916 	}
4917 	if (ew) { /* early warning */
4918 		mk_sense_info_tape(scp, NO_SENSE,
4919 				NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4920 				fixed ? num - i : transfer,
4921 				SENSE_FLAG_EOM);
4922 		return check_condition_result;
4923 	}
4924 
4925 	return 0;
4926 }
4927 
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4928 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4929 {
4930 	bool check_prot;
4931 	u32 num;
4932 	u8 group = 0;
4933 	u32 ei_lba;
4934 	int ret;
4935 	u64 lba;
4936 	struct sdeb_store_info *sip = devip2sip(devip, true);
4937 	u8 *cmd = scp->cmnd;
4938 	bool meta_data_locked = false;
4939 
4940 	switch (cmd[0]) {
4941 	case WRITE_16:
4942 		ei_lba = 0;
4943 		lba = get_unaligned_be64(cmd + 2);
4944 		num = get_unaligned_be32(cmd + 10);
4945 		group = cmd[14] & 0x3f;
4946 		check_prot = true;
4947 		break;
4948 	case WRITE_10:
4949 		ei_lba = 0;
4950 		lba = get_unaligned_be32(cmd + 2);
4951 		group = cmd[6] & 0x3f;
4952 		num = get_unaligned_be16(cmd + 7);
4953 		check_prot = true;
4954 		break;
4955 	case WRITE_6:
4956 		ei_lba = 0;
4957 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4958 		      (u32)(cmd[1] & 0x1f) << 16;
4959 		num = (0 == cmd[4]) ? 256 : cmd[4];
4960 		check_prot = true;
4961 		break;
4962 	case WRITE_12:
4963 		ei_lba = 0;
4964 		lba = get_unaligned_be32(cmd + 2);
4965 		num = get_unaligned_be32(cmd + 6);
4966 		group = cmd[6] & 0x3f;
4967 		check_prot = true;
4968 		break;
4969 	case 0x53:	/* XDWRITEREAD(10) */
4970 		ei_lba = 0;
4971 		lba = get_unaligned_be32(cmd + 2);
4972 		group = cmd[6] & 0x1f;
4973 		num = get_unaligned_be16(cmd + 7);
4974 		check_prot = false;
4975 		break;
4976 	default:	/* assume WRITE(32) */
4977 		group = cmd[6] & 0x3f;
4978 		lba = get_unaligned_be64(cmd + 12);
4979 		ei_lba = get_unaligned_be32(cmd + 20);
4980 		num = get_unaligned_be32(cmd + 28);
4981 		check_prot = false;
4982 		break;
4983 	}
4984 	if (unlikely(have_dif_prot && check_prot)) {
4985 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4986 		    (cmd[1] & 0xe0)) {
4987 			mk_sense_invalid_opcode(scp);
4988 			return check_condition_result;
4989 		}
4990 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4991 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4992 		    (cmd[1] & 0xe0) == 0)
4993 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4994 				    "to DIF device\n");
4995 	}
4996 
4997 	if (sdebug_dev_is_zoned(devip) ||
4998 	    (sdebug_dix && scsi_prot_sg_count(scp)) ||
4999 	    scsi_debug_lbp())  {
5000 		sdeb_meta_write_lock(sip);
5001 		meta_data_locked = true;
5002 	}
5003 
5004 	ret = check_device_access_params(scp, lba, num, true);
5005 	if (ret) {
5006 		if (meta_data_locked)
5007 			sdeb_meta_write_unlock(sip);
5008 		return ret;
5009 	}
5010 
5011 	/* DIX + T10 DIF */
5012 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5013 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
5014 		case 1: /* Guard tag error */
5015 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
5016 				sdeb_meta_write_unlock(sip);
5017 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5018 				return illegal_condition_result;
5019 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
5020 				sdeb_meta_write_unlock(sip);
5021 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5022 				return check_condition_result;
5023 			}
5024 			break;
5025 		case 3: /* Reference tag error */
5026 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
5027 				sdeb_meta_write_unlock(sip);
5028 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
5029 				return illegal_condition_result;
5030 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
5031 				sdeb_meta_write_unlock(sip);
5032 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
5033 				return check_condition_result;
5034 			}
5035 			break;
5036 		}
5037 	}
5038 
5039 	ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
5040 	if (unlikely(scsi_debug_lbp()))
5041 		map_region(sip, lba, num);
5042 
5043 	/* If ZBC zone then bump its write pointer */
5044 	if (sdebug_dev_is_zoned(devip))
5045 		zbc_inc_wp(devip, lba, num);
5046 	if (meta_data_locked)
5047 		sdeb_meta_write_unlock(sip);
5048 
5049 	if (unlikely(-1 == ret))
5050 		return DID_ERROR << 16;
5051 	else if (unlikely(sdebug_verbose &&
5052 			  (ret < (num * sdebug_sector_size))))
5053 		sdev_printk(KERN_INFO, scp->device,
5054 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5055 			    my_name, num * sdebug_sector_size, ret);
5056 
5057 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5058 		     atomic_read(&sdeb_inject_pending))) {
5059 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5060 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5061 			atomic_set(&sdeb_inject_pending, 0);
5062 			return check_condition_result;
5063 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5064 			/* Logical block guard check failed */
5065 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5066 			atomic_set(&sdeb_inject_pending, 0);
5067 			return illegal_condition_result;
5068 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5069 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5070 			atomic_set(&sdeb_inject_pending, 0);
5071 			return illegal_condition_result;
5072 		}
5073 	}
5074 	return 0;
5075 }
5076 
5077 /*
5078  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
5079  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
5080  */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5081 static int resp_write_scat(struct scsi_cmnd *scp,
5082 			   struct sdebug_dev_info *devip)
5083 {
5084 	u8 *cmd = scp->cmnd;
5085 	u8 *lrdp = NULL;
5086 	u8 *up;
5087 	struct sdeb_store_info *sip = devip2sip(devip, true);
5088 	u8 wrprotect;
5089 	u16 lbdof, num_lrd, k;
5090 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
5091 	u32 lb_size = sdebug_sector_size;
5092 	u32 ei_lba;
5093 	u64 lba;
5094 	u8 group;
5095 	int ret, res;
5096 	bool is_16;
5097 	static const u32 lrd_size = 32; /* + parameter list header size */
5098 
5099 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
5100 		is_16 = false;
5101 		group = cmd[6] & 0x3f;
5102 		wrprotect = (cmd[10] >> 5) & 0x7;
5103 		lbdof = get_unaligned_be16(cmd + 12);
5104 		num_lrd = get_unaligned_be16(cmd + 16);
5105 		bt_len = get_unaligned_be32(cmd + 28);
5106 	} else {        /* that leaves WRITE SCATTERED(16) */
5107 		is_16 = true;
5108 		wrprotect = (cmd[2] >> 5) & 0x7;
5109 		lbdof = get_unaligned_be16(cmd + 4);
5110 		num_lrd = get_unaligned_be16(cmd + 8);
5111 		bt_len = get_unaligned_be32(cmd + 10);
5112 		group = cmd[14] & 0x3f;
5113 		if (unlikely(have_dif_prot)) {
5114 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5115 			    wrprotect) {
5116 				mk_sense_invalid_opcode(scp);
5117 				return illegal_condition_result;
5118 			}
5119 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5120 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5121 			     wrprotect == 0)
5122 				sdev_printk(KERN_ERR, scp->device,
5123 					    "Unprotected WR to DIF device\n");
5124 		}
5125 	}
5126 	if ((num_lrd == 0) || (bt_len == 0))
5127 		return 0;       /* T10 says these do-nothings are not errors */
5128 	if (lbdof == 0) {
5129 		if (sdebug_verbose)
5130 			sdev_printk(KERN_INFO, scp->device,
5131 				"%s: %s: LB Data Offset field bad\n",
5132 				my_name, __func__);
5133 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5134 		return illegal_condition_result;
5135 	}
5136 	lbdof_blen = lbdof * lb_size;
5137 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
5138 		if (sdebug_verbose)
5139 			sdev_printk(KERN_INFO, scp->device,
5140 				"%s: %s: LBA range descriptors don't fit\n",
5141 				my_name, __func__);
5142 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5143 		return illegal_condition_result;
5144 	}
5145 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
5146 	if (lrdp == NULL)
5147 		return SCSI_MLQUEUE_HOST_BUSY;
5148 	if (sdebug_verbose)
5149 		sdev_printk(KERN_INFO, scp->device,
5150 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
5151 			my_name, __func__, lbdof_blen);
5152 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
5153 	if (res == -1) {
5154 		ret = DID_ERROR << 16;
5155 		goto err_out;
5156 	}
5157 
5158 	/* Just keep it simple and always lock for now */
5159 	sdeb_meta_write_lock(sip);
5160 	sg_off = lbdof_blen;
5161 	/* Spec says Buffer xfer Length field in number of LBs in dout */
5162 	cum_lb = 0;
5163 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
5164 		lba = get_unaligned_be64(up + 0);
5165 		num = get_unaligned_be32(up + 8);
5166 		if (sdebug_verbose)
5167 			sdev_printk(KERN_INFO, scp->device,
5168 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
5169 				my_name, __func__, k, lba, num, sg_off);
5170 		if (num == 0)
5171 			continue;
5172 		ret = check_device_access_params(scp, lba, num, true);
5173 		if (ret)
5174 			goto err_out_unlock;
5175 		num_by = num * lb_size;
5176 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
5177 
5178 		if ((cum_lb + num) > bt_len) {
5179 			if (sdebug_verbose)
5180 				sdev_printk(KERN_INFO, scp->device,
5181 				    "%s: %s: sum of blocks > data provided\n",
5182 				    my_name, __func__);
5183 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
5184 					0);
5185 			ret = illegal_condition_result;
5186 			goto err_out_unlock;
5187 		}
5188 
5189 		/* DIX + T10 DIF */
5190 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5191 			int prot_ret = prot_verify_write(scp, lba, num,
5192 							 ei_lba);
5193 
5194 			if (prot_ret) {
5195 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
5196 						prot_ret);
5197 				ret = illegal_condition_result;
5198 				goto err_out_unlock;
5199 			}
5200 		}
5201 
5202 		/*
5203 		 * Write ranges atomically to keep as close to pre-atomic
5204 		 * writes behaviour as possible.
5205 		 */
5206 		ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
5207 		/* If ZBC zone then bump its write pointer */
5208 		if (sdebug_dev_is_zoned(devip))
5209 			zbc_inc_wp(devip, lba, num);
5210 		if (unlikely(scsi_debug_lbp()))
5211 			map_region(sip, lba, num);
5212 		if (unlikely(-1 == ret)) {
5213 			ret = DID_ERROR << 16;
5214 			goto err_out_unlock;
5215 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
5216 			sdev_printk(KERN_INFO, scp->device,
5217 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5218 			    my_name, num_by, ret);
5219 
5220 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5221 			     atomic_read(&sdeb_inject_pending))) {
5222 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5223 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5224 				atomic_set(&sdeb_inject_pending, 0);
5225 				ret = check_condition_result;
5226 				goto err_out_unlock;
5227 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5228 				/* Logical block guard check failed */
5229 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5230 				atomic_set(&sdeb_inject_pending, 0);
5231 				ret = illegal_condition_result;
5232 				goto err_out_unlock;
5233 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5234 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5235 				atomic_set(&sdeb_inject_pending, 0);
5236 				ret = illegal_condition_result;
5237 				goto err_out_unlock;
5238 			}
5239 		}
5240 		sg_off += num_by;
5241 		cum_lb += num;
5242 	}
5243 	ret = 0;
5244 err_out_unlock:
5245 	sdeb_meta_write_unlock(sip);
5246 err_out:
5247 	kfree(lrdp);
5248 	return ret;
5249 }
5250 
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)5251 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
5252 			   u32 ei_lba, bool unmap, bool ndob)
5253 {
5254 	struct scsi_device *sdp = scp->device;
5255 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5256 	unsigned long long i;
5257 	u64 block, lbaa;
5258 	u32 lb_size = sdebug_sector_size;
5259 	int ret;
5260 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
5261 						scp->device->hostdata, true);
5262 	u8 *fs1p;
5263 	u8 *fsp;
5264 	bool meta_data_locked = false;
5265 
5266 	if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
5267 		sdeb_meta_write_lock(sip);
5268 		meta_data_locked = true;
5269 	}
5270 
5271 	ret = check_device_access_params(scp, lba, num, true);
5272 	if (ret)
5273 		goto out;
5274 
5275 	if (unmap && scsi_debug_lbp()) {
5276 		unmap_region(sip, lba, num);
5277 		goto out;
5278 	}
5279 	lbaa = lba;
5280 	block = do_div(lbaa, sdebug_store_sectors);
5281 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
5282 	fsp = sip->storep;
5283 	fs1p = fsp + (block * lb_size);
5284 	sdeb_data_write_lock(sip);
5285 	if (ndob) {
5286 		memset(fs1p, 0, lb_size);
5287 		ret = 0;
5288 	} else
5289 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
5290 
5291 	if (-1 == ret) {
5292 		ret = DID_ERROR << 16;
5293 		goto out;
5294 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
5295 		sdev_printk(KERN_INFO, scp->device,
5296 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
5297 			    my_name, "write same", lb_size, ret);
5298 
5299 	/* Copy first sector to remaining blocks */
5300 	for (i = 1 ; i < num ; i++) {
5301 		lbaa = lba + i;
5302 		block = do_div(lbaa, sdebug_store_sectors);
5303 		memmove(fsp + (block * lb_size), fs1p, lb_size);
5304 	}
5305 	if (scsi_debug_lbp())
5306 		map_region(sip, lba, num);
5307 	/* If ZBC zone then bump its write pointer */
5308 	if (sdebug_dev_is_zoned(devip))
5309 		zbc_inc_wp(devip, lba, num);
5310 	sdeb_data_write_unlock(sip);
5311 	ret = 0;
5312 out:
5313 	if (meta_data_locked)
5314 		sdeb_meta_write_unlock(sip);
5315 	return ret;
5316 }
5317 
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5318 static int resp_write_same_10(struct scsi_cmnd *scp,
5319 			      struct sdebug_dev_info *devip)
5320 {
5321 	u8 *cmd = scp->cmnd;
5322 	u32 lba;
5323 	u16 num;
5324 	u32 ei_lba = 0;
5325 	bool unmap = false;
5326 
5327 	if (cmd[1] & 0x8) {
5328 		if (sdebug_lbpws10 == 0) {
5329 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5330 			return check_condition_result;
5331 		} else
5332 			unmap = true;
5333 	}
5334 	lba = get_unaligned_be32(cmd + 2);
5335 	num = get_unaligned_be16(cmd + 7);
5336 	if (num > sdebug_write_same_length) {
5337 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5338 		return check_condition_result;
5339 	}
5340 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
5341 }
5342 
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5343 static int resp_write_same_16(struct scsi_cmnd *scp,
5344 			      struct sdebug_dev_info *devip)
5345 {
5346 	u8 *cmd = scp->cmnd;
5347 	u64 lba;
5348 	u32 num;
5349 	u32 ei_lba = 0;
5350 	bool unmap = false;
5351 	bool ndob = false;
5352 
5353 	if (cmd[1] & 0x8) {	/* UNMAP */
5354 		if (sdebug_lbpws == 0) {
5355 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5356 			return check_condition_result;
5357 		} else
5358 			unmap = true;
5359 	}
5360 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
5361 		ndob = true;
5362 	lba = get_unaligned_be64(cmd + 2);
5363 	num = get_unaligned_be32(cmd + 10);
5364 	if (num > sdebug_write_same_length) {
5365 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5366 		return check_condition_result;
5367 	}
5368 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
5369 }
5370 
5371 /* Note the mode field is in the same position as the (lower) service action
5372  * field. For the Report supported operation codes command, SPC-4 suggests
5373  * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5374 static int resp_write_buffer(struct scsi_cmnd *scp,
5375 			     struct sdebug_dev_info *devip)
5376 {
5377 	u8 *cmd = scp->cmnd;
5378 	struct scsi_device *sdp = scp->device;
5379 	struct sdebug_dev_info *dp;
5380 	u8 mode;
5381 
5382 	mode = cmd[1] & 0x1f;
5383 	switch (mode) {
5384 	case 0x4:	/* download microcode (MC) and activate (ACT) */
5385 		/* set UAs on this device only */
5386 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5387 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
5388 		break;
5389 	case 0x5:	/* download MC, save and ACT */
5390 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
5391 		break;
5392 	case 0x6:	/* download MC with offsets and ACT */
5393 		/* set UAs on most devices (LUs) in this target */
5394 		list_for_each_entry(dp,
5395 				    &devip->sdbg_host->dev_info_list,
5396 				    dev_list)
5397 			if (dp->target == sdp->id) {
5398 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
5399 				if (devip != dp)
5400 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
5401 						dp->uas_bm);
5402 			}
5403 		break;
5404 	case 0x7:	/* download MC with offsets, save, and ACT */
5405 		/* set UA on all devices (LUs) in this target */
5406 		list_for_each_entry(dp,
5407 				    &devip->sdbg_host->dev_info_list,
5408 				    dev_list)
5409 			if (dp->target == sdp->id)
5410 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
5411 					dp->uas_bm);
5412 		break;
5413 	default:
5414 		/* do nothing for this command for other mode values */
5415 		break;
5416 	}
5417 	return 0;
5418 }
5419 
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5420 static int resp_comp_write(struct scsi_cmnd *scp,
5421 			   struct sdebug_dev_info *devip)
5422 {
5423 	u8 *cmd = scp->cmnd;
5424 	u8 *arr;
5425 	struct sdeb_store_info *sip = devip2sip(devip, true);
5426 	u64 lba;
5427 	u32 dnum;
5428 	u32 lb_size = sdebug_sector_size;
5429 	u8 num;
5430 	int ret;
5431 	int retval = 0;
5432 
5433 	lba = get_unaligned_be64(cmd + 2);
5434 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
5435 	if (0 == num)
5436 		return 0;	/* degenerate case, not an error */
5437 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5438 	    (cmd[1] & 0xe0)) {
5439 		mk_sense_invalid_opcode(scp);
5440 		return check_condition_result;
5441 	}
5442 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5443 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5444 	    (cmd[1] & 0xe0) == 0)
5445 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
5446 			    "to DIF device\n");
5447 	ret = check_device_access_params(scp, lba, num, false);
5448 	if (ret)
5449 		return ret;
5450 	dnum = 2 * num;
5451 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
5452 	if (NULL == arr) {
5453 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5454 				INSUFF_RES_ASCQ);
5455 		return check_condition_result;
5456 	}
5457 
5458 	ret = do_dout_fetch(scp, dnum, arr);
5459 	if (ret == -1) {
5460 		retval = DID_ERROR << 16;
5461 		goto cleanup_free;
5462 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
5463 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
5464 			    "indicated=%u, IO sent=%d bytes\n", my_name,
5465 			    dnum * lb_size, ret);
5466 
5467 	sdeb_data_write_lock(sip);
5468 	sdeb_meta_write_lock(sip);
5469 	if (!comp_write_worker(sip, lba, num, arr, false)) {
5470 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5471 		retval = check_condition_result;
5472 		goto cleanup_unlock;
5473 	}
5474 
5475 	/* Cover sip->map_storep (which map_region()) sets with data lock */
5476 	if (scsi_debug_lbp())
5477 		map_region(sip, lba, num);
5478 cleanup_unlock:
5479 	sdeb_meta_write_unlock(sip);
5480 	sdeb_data_write_unlock(sip);
5481 cleanup_free:
5482 	kfree(arr);
5483 	return retval;
5484 }
5485 
5486 struct unmap_block_desc {
5487 	__be64	lba;
5488 	__be32	blocks;
5489 	__be32	__reserved;
5490 };
5491 
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5492 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5493 {
5494 	unsigned char *buf;
5495 	struct unmap_block_desc *desc;
5496 	struct sdeb_store_info *sip = devip2sip(devip, true);
5497 	unsigned int i, payload_len, descriptors;
5498 	int ret;
5499 
5500 	if (!scsi_debug_lbp())
5501 		return 0;	/* fib and say its done */
5502 	payload_len = get_unaligned_be16(scp->cmnd + 7);
5503 	BUG_ON(scsi_bufflen(scp) != payload_len);
5504 
5505 	descriptors = (payload_len - 8) / 16;
5506 	if (descriptors > sdebug_unmap_max_desc) {
5507 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5508 		return check_condition_result;
5509 	}
5510 
5511 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
5512 	if (!buf) {
5513 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5514 				INSUFF_RES_ASCQ);
5515 		return check_condition_result;
5516 	}
5517 
5518 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
5519 
5520 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
5521 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
5522 
5523 	desc = (void *)&buf[8];
5524 
5525 	sdeb_meta_write_lock(sip);
5526 
5527 	for (i = 0 ; i < descriptors ; i++) {
5528 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
5529 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
5530 
5531 		ret = check_device_access_params(scp, lba, num, true);
5532 		if (ret)
5533 			goto out;
5534 
5535 		unmap_region(sip, lba, num);
5536 	}
5537 
5538 	ret = 0;
5539 
5540 out:
5541 	sdeb_meta_write_unlock(sip);
5542 	kfree(buf);
5543 
5544 	return ret;
5545 }
5546 
5547 #define SDEBUG_GET_LBA_STATUS_LEN 32
5548 
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5549 static int resp_get_lba_status(struct scsi_cmnd *scp,
5550 			       struct sdebug_dev_info *devip)
5551 {
5552 	u8 *cmd = scp->cmnd;
5553 	u64 lba;
5554 	u32 alloc_len, mapped, num;
5555 	int ret;
5556 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
5557 
5558 	lba = get_unaligned_be64(cmd + 2);
5559 	alloc_len = get_unaligned_be32(cmd + 10);
5560 
5561 	if (alloc_len < 24)
5562 		return 0;
5563 
5564 	ret = check_device_access_params(scp, lba, 1, false);
5565 	if (ret)
5566 		return ret;
5567 
5568 	if (scsi_debug_lbp()) {
5569 		struct sdeb_store_info *sip = devip2sip(devip, true);
5570 
5571 		mapped = map_state(sip, lba, &num);
5572 	} else {
5573 		mapped = 1;
5574 		/* following just in case virtual_gb changed */
5575 		sdebug_capacity = get_sdebug_capacity();
5576 		if (sdebug_capacity - lba <= 0xffffffff)
5577 			num = sdebug_capacity - lba;
5578 		else
5579 			num = 0xffffffff;
5580 	}
5581 
5582 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
5583 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
5584 	put_unaligned_be64(lba, arr + 8);	/* LBA */
5585 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
5586 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
5587 
5588 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
5589 }
5590 
resp_get_stream_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5591 static int resp_get_stream_status(struct scsi_cmnd *scp,
5592 				  struct sdebug_dev_info *devip)
5593 {
5594 	u16 starting_stream_id, stream_id;
5595 	const u8 *cmd = scp->cmnd;
5596 	u32 alloc_len, offset;
5597 	u8 arr[256] = {};
5598 	struct scsi_stream_status_header *h = (void *)arr;
5599 
5600 	starting_stream_id = get_unaligned_be16(cmd + 4);
5601 	alloc_len = get_unaligned_be32(cmd + 10);
5602 
5603 	if (alloc_len < 8) {
5604 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5605 		return check_condition_result;
5606 	}
5607 
5608 	if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
5609 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
5610 		return check_condition_result;
5611 	}
5612 
5613 	/*
5614 	 * The GET STREAM STATUS command only reports status information
5615 	 * about open streams. Treat the non-permanent stream as open.
5616 	 */
5617 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
5618 			   &h->number_of_open_streams);
5619 
5620 	for (offset = 8, stream_id = starting_stream_id;
5621 	     offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
5622 		     stream_id < MAXIMUM_NUMBER_OF_STREAMS;
5623 	     offset += 8, stream_id++) {
5624 		struct scsi_stream_status *stream_status = (void *)arr + offset;
5625 
5626 		stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
5627 		put_unaligned_be16(stream_id,
5628 				   &stream_status->stream_identifier);
5629 		stream_status->rel_lifetime = stream_id + 1;
5630 	}
5631 	put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
5632 
5633 	return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
5634 }
5635 
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5636 static int resp_sync_cache(struct scsi_cmnd *scp,
5637 			   struct sdebug_dev_info *devip)
5638 {
5639 	int res = 0;
5640 	u64 lba;
5641 	u32 num_blocks;
5642 	u8 *cmd = scp->cmnd;
5643 
5644 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
5645 		lba = get_unaligned_be32(cmd + 2);
5646 		num_blocks = get_unaligned_be16(cmd + 7);
5647 	} else {				/* SYNCHRONIZE_CACHE(16) */
5648 		lba = get_unaligned_be64(cmd + 2);
5649 		num_blocks = get_unaligned_be32(cmd + 10);
5650 	}
5651 	if (lba + num_blocks > sdebug_capacity) {
5652 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5653 		return check_condition_result;
5654 	}
5655 	if (!write_since_sync || (cmd[1] & 0x2))
5656 		res = SDEG_RES_IMMED_MASK;
5657 	else		/* delay if write_since_sync and IMMED clear */
5658 		write_since_sync = false;
5659 	return res;
5660 }
5661 
5662 /*
5663  * Assuming the LBA+num_blocks is not out-of-range, this function will return
5664  * CONDITION MET if the specified blocks will/have fitted in the cache, and
5665  * a GOOD status otherwise. Model a disk with a big cache and yield
5666  * CONDITION MET. Actually tries to bring range in main memory into the
5667  * cache associated with the CPU(s).
5668  *
5669  * The pcode 0x34 is also used for READ POSITION by tape devices.
5670  */
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5671 static int resp_pre_fetch(struct scsi_cmnd *scp,
5672 			  struct sdebug_dev_info *devip)
5673 {
5674 	int res = 0;
5675 	u64 lba;
5676 	u64 block, rest = 0;
5677 	u32 nblks;
5678 	u8 *cmd = scp->cmnd;
5679 	struct sdeb_store_info *sip = devip2sip(devip, true);
5680 	u8 *fsp = sip->storep;
5681 
5682 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
5683 		lba = get_unaligned_be32(cmd + 2);
5684 		nblks = get_unaligned_be16(cmd + 7);
5685 	} else {			/* PRE-FETCH(16) */
5686 		lba = get_unaligned_be64(cmd + 2);
5687 		nblks = get_unaligned_be32(cmd + 10);
5688 	}
5689 	if (lba + nblks > sdebug_capacity) {
5690 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5691 		return check_condition_result;
5692 	}
5693 	if (!fsp)
5694 		goto fini;
5695 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
5696 	block = do_div(lba, sdebug_store_sectors);
5697 	if (block + nblks > sdebug_store_sectors)
5698 		rest = block + nblks - sdebug_store_sectors;
5699 
5700 	/* Try to bring the PRE-FETCH range into CPU's cache */
5701 	sdeb_data_read_lock(sip);
5702 	prefetch_range(fsp + (sdebug_sector_size * block),
5703 		       (nblks - rest) * sdebug_sector_size);
5704 	if (rest)
5705 		prefetch_range(fsp, rest * sdebug_sector_size);
5706 
5707 	sdeb_data_read_unlock(sip);
5708 fini:
5709 	if (cmd[1] & 0x2)
5710 		res = SDEG_RES_IMMED_MASK;
5711 	return res | condition_met_result;
5712 }
5713 
5714 #define RL_BUCKET_ELEMS 8
5715 
5716 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
5717  * (W-LUN), the normal Linux scanning logic does not associate it with a
5718  * device (e.g. /dev/sg7). The following magic will make that association:
5719  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
5720  * where <n> is a host number. If there are multiple targets in a host then
5721  * the above will associate a W-LUN to each target. To only get a W-LUN
5722  * for target 2, then use "echo '- 2 49409' > scan" .
5723  */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5724 static int resp_report_luns(struct scsi_cmnd *scp,
5725 			    struct sdebug_dev_info *devip)
5726 {
5727 	unsigned char *cmd = scp->cmnd;
5728 	unsigned int alloc_len;
5729 	unsigned char select_report;
5730 	u64 lun;
5731 	struct scsi_lun *lun_p;
5732 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
5733 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
5734 	unsigned int wlun_cnt;	/* report luns W-LUN count */
5735 	unsigned int tlun_cnt;	/* total LUN count */
5736 	unsigned int rlen;	/* response length (in bytes) */
5737 	int k, j, n, res;
5738 	unsigned int off_rsp = 0;
5739 	const int sz_lun = sizeof(struct scsi_lun);
5740 
5741 	clear_luns_changed_on_target(devip);
5742 
5743 	select_report = cmd[2];
5744 	alloc_len = get_unaligned_be32(cmd + 6);
5745 
5746 	if (alloc_len < 4) {
5747 		pr_err("alloc len too small %d\n", alloc_len);
5748 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
5749 		return check_condition_result;
5750 	}
5751 
5752 	switch (select_report) {
5753 	case 0:		/* all LUNs apart from W-LUNs */
5754 		lun_cnt = sdebug_max_luns;
5755 		wlun_cnt = 0;
5756 		break;
5757 	case 1:		/* only W-LUNs */
5758 		lun_cnt = 0;
5759 		wlun_cnt = 1;
5760 		break;
5761 	case 2:		/* all LUNs */
5762 		lun_cnt = sdebug_max_luns;
5763 		wlun_cnt = 1;
5764 		break;
5765 	case 0x10:	/* only administrative LUs */
5766 	case 0x11:	/* see SPC-5 */
5767 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
5768 	default:
5769 		pr_debug("select report invalid %d\n", select_report);
5770 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5771 		return check_condition_result;
5772 	}
5773 
5774 	if (sdebug_no_lun_0 && (lun_cnt > 0))
5775 		--lun_cnt;
5776 
5777 	tlun_cnt = lun_cnt + wlun_cnt;
5778 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
5779 	scsi_set_resid(scp, scsi_bufflen(scp));
5780 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
5781 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
5782 
5783 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
5784 	lun = sdebug_no_lun_0 ? 1 : 0;
5785 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
5786 		memset(arr, 0, sizeof(arr));
5787 		lun_p = (struct scsi_lun *)&arr[0];
5788 		if (k == 0) {
5789 			put_unaligned_be32(rlen, &arr[0]);
5790 			++lun_p;
5791 			j = 1;
5792 		}
5793 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
5794 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
5795 				break;
5796 			int_to_scsilun(lun++, lun_p);
5797 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
5798 				lun_p->scsi_lun[0] |= 0x40;
5799 		}
5800 		if (j < RL_BUCKET_ELEMS)
5801 			break;
5802 		n = j * sz_lun;
5803 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
5804 		if (res)
5805 			return res;
5806 		off_rsp += n;
5807 	}
5808 	if (wlun_cnt) {
5809 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
5810 		++j;
5811 	}
5812 	if (j > 0)
5813 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
5814 	return res;
5815 }
5816 
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5817 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5818 {
5819 	bool is_bytchk3 = false;
5820 	u8 bytchk;
5821 	int ret, j;
5822 	u32 vnum, a_num, off;
5823 	const u32 lb_size = sdebug_sector_size;
5824 	u64 lba;
5825 	u8 *arr;
5826 	u8 *cmd = scp->cmnd;
5827 	struct sdeb_store_info *sip = devip2sip(devip, true);
5828 
5829 	bytchk = (cmd[1] >> 1) & 0x3;
5830 	if (bytchk == 0) {
5831 		return 0;	/* always claim internal verify okay */
5832 	} else if (bytchk == 2) {
5833 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
5834 		return check_condition_result;
5835 	} else if (bytchk == 3) {
5836 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
5837 	}
5838 	switch (cmd[0]) {
5839 	case VERIFY_16:
5840 		lba = get_unaligned_be64(cmd + 2);
5841 		vnum = get_unaligned_be32(cmd + 10);
5842 		break;
5843 	case VERIFY:		/* is VERIFY(10) */
5844 		lba = get_unaligned_be32(cmd + 2);
5845 		vnum = get_unaligned_be16(cmd + 7);
5846 		break;
5847 	default:
5848 		mk_sense_invalid_opcode(scp);
5849 		return check_condition_result;
5850 	}
5851 	if (vnum == 0)
5852 		return 0;	/* not an error */
5853 	a_num = is_bytchk3 ? 1 : vnum;
5854 	/* Treat following check like one for read (i.e. no write) access */
5855 	ret = check_device_access_params(scp, lba, a_num, false);
5856 	if (ret)
5857 		return ret;
5858 
5859 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
5860 	if (!arr) {
5861 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5862 				INSUFF_RES_ASCQ);
5863 		return check_condition_result;
5864 	}
5865 	/* Not changing store, so only need read access */
5866 	sdeb_data_read_lock(sip);
5867 
5868 	ret = do_dout_fetch(scp, a_num, arr);
5869 	if (ret == -1) {
5870 		ret = DID_ERROR << 16;
5871 		goto cleanup;
5872 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
5873 		sdev_printk(KERN_INFO, scp->device,
5874 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
5875 			    my_name, __func__, a_num * lb_size, ret);
5876 	}
5877 	if (is_bytchk3) {
5878 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
5879 			memcpy(arr + off, arr, lb_size);
5880 	}
5881 	ret = 0;
5882 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
5883 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5884 		ret = check_condition_result;
5885 		goto cleanup;
5886 	}
5887 cleanup:
5888 	sdeb_data_read_unlock(sip);
5889 	kfree(arr);
5890 	return ret;
5891 }
5892 
5893 #define RZONES_DESC_HD 64
5894 
5895 /* Report zones depending on start LBA and reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5896 static int resp_report_zones(struct scsi_cmnd *scp,
5897 			     struct sdebug_dev_info *devip)
5898 {
5899 	unsigned int rep_max_zones, nrz = 0;
5900 	int ret = 0;
5901 	u32 alloc_len, rep_opts, rep_len;
5902 	bool partial;
5903 	u64 lba, zs_lba;
5904 	u8 *arr = NULL, *desc;
5905 	u8 *cmd = scp->cmnd;
5906 	struct sdeb_zone_state *zsp = NULL;
5907 	struct sdeb_store_info *sip = devip2sip(devip, false);
5908 
5909 	if (!sdebug_dev_is_zoned(devip)) {
5910 		mk_sense_invalid_opcode(scp);
5911 		return check_condition_result;
5912 	}
5913 	zs_lba = get_unaligned_be64(cmd + 2);
5914 	alloc_len = get_unaligned_be32(cmd + 10);
5915 	if (alloc_len == 0)
5916 		return 0;	/* not an error */
5917 	rep_opts = cmd[14] & 0x3f;
5918 	partial = cmd[14] & 0x80;
5919 
5920 	if (zs_lba >= sdebug_capacity) {
5921 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5922 		return check_condition_result;
5923 	}
5924 
5925 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
5926 
5927 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
5928 	if (!arr) {
5929 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5930 				INSUFF_RES_ASCQ);
5931 		return check_condition_result;
5932 	}
5933 
5934 	sdeb_meta_read_lock(sip);
5935 
5936 	desc = arr + 64;
5937 	for (lba = zs_lba; lba < sdebug_capacity;
5938 	     lba = zsp->z_start + zsp->z_size) {
5939 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
5940 			break;
5941 		zsp = zbc_zone(devip, lba);
5942 		switch (rep_opts) {
5943 		case 0x00:
5944 			/* All zones */
5945 			break;
5946 		case 0x01:
5947 			/* Empty zones */
5948 			if (zsp->z_cond != ZC1_EMPTY)
5949 				continue;
5950 			break;
5951 		case 0x02:
5952 			/* Implicit open zones */
5953 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
5954 				continue;
5955 			break;
5956 		case 0x03:
5957 			/* Explicit open zones */
5958 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
5959 				continue;
5960 			break;
5961 		case 0x04:
5962 			/* Closed zones */
5963 			if (zsp->z_cond != ZC4_CLOSED)
5964 				continue;
5965 			break;
5966 		case 0x05:
5967 			/* Full zones */
5968 			if (zsp->z_cond != ZC5_FULL)
5969 				continue;
5970 			break;
5971 		case 0x06:
5972 		case 0x07:
5973 		case 0x10:
5974 			/*
5975 			 * Read-only, offline, reset WP recommended are
5976 			 * not emulated: no zones to report;
5977 			 */
5978 			continue;
5979 		case 0x11:
5980 			/* non-seq-resource set */
5981 			if (!zsp->z_non_seq_resource)
5982 				continue;
5983 			break;
5984 		case 0x3e:
5985 			/* All zones except gap zones. */
5986 			if (zbc_zone_is_gap(zsp))
5987 				continue;
5988 			break;
5989 		case 0x3f:
5990 			/* Not write pointer (conventional) zones */
5991 			if (zbc_zone_is_seq(zsp))
5992 				continue;
5993 			break;
5994 		default:
5995 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
5996 					INVALID_FIELD_IN_CDB, 0);
5997 			ret = check_condition_result;
5998 			goto fini;
5999 		}
6000 
6001 		if (nrz < rep_max_zones) {
6002 			/* Fill zone descriptor */
6003 			desc[0] = zsp->z_type;
6004 			desc[1] = zsp->z_cond << 4;
6005 			if (zsp->z_non_seq_resource)
6006 				desc[1] |= 1 << 1;
6007 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
6008 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
6009 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
6010 			desc += 64;
6011 		}
6012 
6013 		if (partial && nrz >= rep_max_zones)
6014 			break;
6015 
6016 		nrz++;
6017 	}
6018 
6019 	/* Report header */
6020 	/* Zone list length. */
6021 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
6022 	/* Maximum LBA */
6023 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
6024 	/* Zone starting LBA granularity. */
6025 	if (devip->zcap < devip->zsize)
6026 		put_unaligned_be64(devip->zsize, arr + 16);
6027 
6028 	rep_len = (unsigned long)desc - (unsigned long)arr;
6029 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
6030 
6031 fini:
6032 	sdeb_meta_read_unlock(sip);
6033 	kfree(arr);
6034 	return ret;
6035 }
6036 
resp_atomic_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6037 static int resp_atomic_write(struct scsi_cmnd *scp,
6038 			     struct sdebug_dev_info *devip)
6039 {
6040 	struct sdeb_store_info *sip;
6041 	u8 *cmd = scp->cmnd;
6042 	u16 boundary, len;
6043 	u64 lba, lba_tmp;
6044 	int ret;
6045 
6046 	if (!scsi_debug_atomic_write()) {
6047 		mk_sense_invalid_opcode(scp);
6048 		return check_condition_result;
6049 	}
6050 
6051 	sip = devip2sip(devip, true);
6052 
6053 	lba = get_unaligned_be64(cmd + 2);
6054 	boundary = get_unaligned_be16(cmd + 10);
6055 	len = get_unaligned_be16(cmd + 12);
6056 
6057 	lba_tmp = lba;
6058 	if (sdebug_atomic_wr_align &&
6059 	    do_div(lba_tmp, sdebug_atomic_wr_align)) {
6060 		/* Does not meet alignment requirement */
6061 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6062 		return check_condition_result;
6063 	}
6064 
6065 	if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
6066 		/* Does not meet alignment requirement */
6067 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6068 		return check_condition_result;
6069 	}
6070 
6071 	if (boundary > 0) {
6072 		if (boundary > sdebug_atomic_wr_max_bndry) {
6073 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6074 			return check_condition_result;
6075 		}
6076 
6077 		if (len > sdebug_atomic_wr_max_length_bndry) {
6078 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6079 			return check_condition_result;
6080 		}
6081 	} else {
6082 		if (len > sdebug_atomic_wr_max_length) {
6083 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6084 			return check_condition_result;
6085 		}
6086 	}
6087 
6088 	ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
6089 	if (unlikely(ret == -1))
6090 		return DID_ERROR << 16;
6091 	if (unlikely(ret != len * sdebug_sector_size))
6092 		return DID_ERROR << 16;
6093 	return 0;
6094 }
6095 
6096 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)6097 static void zbc_open_all(struct sdebug_dev_info *devip)
6098 {
6099 	struct sdeb_zone_state *zsp = &devip->zstate[0];
6100 	unsigned int i;
6101 
6102 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
6103 		if (zsp->z_cond == ZC4_CLOSED)
6104 			zbc_open_zone(devip, &devip->zstate[i], true);
6105 	}
6106 }
6107 
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6108 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6109 {
6110 	int res = 0;
6111 	u64 z_id;
6112 	enum sdebug_z_cond zc;
6113 	u8 *cmd = scp->cmnd;
6114 	struct sdeb_zone_state *zsp;
6115 	bool all = cmd[14] & 0x01;
6116 	struct sdeb_store_info *sip = devip2sip(devip, false);
6117 
6118 	if (!sdebug_dev_is_zoned(devip)) {
6119 		mk_sense_invalid_opcode(scp);
6120 		return check_condition_result;
6121 	}
6122 	sdeb_meta_write_lock(sip);
6123 
6124 	if (all) {
6125 		/* Check if all closed zones can be open */
6126 		if (devip->max_open &&
6127 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
6128 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6129 					INSUFF_ZONE_ASCQ);
6130 			res = check_condition_result;
6131 			goto fini;
6132 		}
6133 		/* Open all closed zones */
6134 		zbc_open_all(devip);
6135 		goto fini;
6136 	}
6137 
6138 	/* Open the specified zone */
6139 	z_id = get_unaligned_be64(cmd + 2);
6140 	if (z_id >= sdebug_capacity) {
6141 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6142 		res = check_condition_result;
6143 		goto fini;
6144 	}
6145 
6146 	zsp = zbc_zone(devip, z_id);
6147 	if (z_id != zsp->z_start) {
6148 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6149 		res = check_condition_result;
6150 		goto fini;
6151 	}
6152 	if (zbc_zone_is_conv(zsp)) {
6153 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6154 		res = check_condition_result;
6155 		goto fini;
6156 	}
6157 
6158 	zc = zsp->z_cond;
6159 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
6160 		goto fini;
6161 
6162 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
6163 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6164 				INSUFF_ZONE_ASCQ);
6165 		res = check_condition_result;
6166 		goto fini;
6167 	}
6168 
6169 	zbc_open_zone(devip, zsp, true);
6170 fini:
6171 	sdeb_meta_write_unlock(sip);
6172 	return res;
6173 }
6174 
zbc_close_all(struct sdebug_dev_info * devip)6175 static void zbc_close_all(struct sdebug_dev_info *devip)
6176 {
6177 	unsigned int i;
6178 
6179 	for (i = 0; i < devip->nr_zones; i++)
6180 		zbc_close_zone(devip, &devip->zstate[i]);
6181 }
6182 
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6183 static int resp_close_zone(struct scsi_cmnd *scp,
6184 			   struct sdebug_dev_info *devip)
6185 {
6186 	int res = 0;
6187 	u64 z_id;
6188 	u8 *cmd = scp->cmnd;
6189 	struct sdeb_zone_state *zsp;
6190 	bool all = cmd[14] & 0x01;
6191 	struct sdeb_store_info *sip = devip2sip(devip, false);
6192 
6193 	if (!sdebug_dev_is_zoned(devip)) {
6194 		mk_sense_invalid_opcode(scp);
6195 		return check_condition_result;
6196 	}
6197 
6198 	sdeb_meta_write_lock(sip);
6199 
6200 	if (all) {
6201 		zbc_close_all(devip);
6202 		goto fini;
6203 	}
6204 
6205 	/* Close specified zone */
6206 	z_id = get_unaligned_be64(cmd + 2);
6207 	if (z_id >= sdebug_capacity) {
6208 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6209 		res = check_condition_result;
6210 		goto fini;
6211 	}
6212 
6213 	zsp = zbc_zone(devip, z_id);
6214 	if (z_id != zsp->z_start) {
6215 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6216 		res = check_condition_result;
6217 		goto fini;
6218 	}
6219 	if (zbc_zone_is_conv(zsp)) {
6220 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6221 		res = check_condition_result;
6222 		goto fini;
6223 	}
6224 
6225 	zbc_close_zone(devip, zsp);
6226 fini:
6227 	sdeb_meta_write_unlock(sip);
6228 	return res;
6229 }
6230 
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)6231 static void zbc_finish_zone(struct sdebug_dev_info *devip,
6232 			    struct sdeb_zone_state *zsp, bool empty)
6233 {
6234 	enum sdebug_z_cond zc = zsp->z_cond;
6235 
6236 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
6237 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
6238 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6239 			zbc_close_zone(devip, zsp);
6240 		if (zsp->z_cond == ZC4_CLOSED)
6241 			devip->nr_closed--;
6242 		zsp->z_wp = zsp->z_start + zsp->z_size;
6243 		zsp->z_cond = ZC5_FULL;
6244 	}
6245 }
6246 
zbc_finish_all(struct sdebug_dev_info * devip)6247 static void zbc_finish_all(struct sdebug_dev_info *devip)
6248 {
6249 	unsigned int i;
6250 
6251 	for (i = 0; i < devip->nr_zones; i++)
6252 		zbc_finish_zone(devip, &devip->zstate[i], false);
6253 }
6254 
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6255 static int resp_finish_zone(struct scsi_cmnd *scp,
6256 			    struct sdebug_dev_info *devip)
6257 {
6258 	struct sdeb_zone_state *zsp;
6259 	int res = 0;
6260 	u64 z_id;
6261 	u8 *cmd = scp->cmnd;
6262 	bool all = cmd[14] & 0x01;
6263 	struct sdeb_store_info *sip = devip2sip(devip, false);
6264 
6265 	if (!sdebug_dev_is_zoned(devip)) {
6266 		mk_sense_invalid_opcode(scp);
6267 		return check_condition_result;
6268 	}
6269 
6270 	sdeb_meta_write_lock(sip);
6271 
6272 	if (all) {
6273 		zbc_finish_all(devip);
6274 		goto fini;
6275 	}
6276 
6277 	/* Finish the specified zone */
6278 	z_id = get_unaligned_be64(cmd + 2);
6279 	if (z_id >= sdebug_capacity) {
6280 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6281 		res = check_condition_result;
6282 		goto fini;
6283 	}
6284 
6285 	zsp = zbc_zone(devip, z_id);
6286 	if (z_id != zsp->z_start) {
6287 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6288 		res = check_condition_result;
6289 		goto fini;
6290 	}
6291 	if (zbc_zone_is_conv(zsp)) {
6292 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6293 		res = check_condition_result;
6294 		goto fini;
6295 	}
6296 
6297 	zbc_finish_zone(devip, zsp, true);
6298 fini:
6299 	sdeb_meta_write_unlock(sip);
6300 	return res;
6301 }
6302 
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)6303 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
6304 			 struct sdeb_zone_state *zsp)
6305 {
6306 	enum sdebug_z_cond zc;
6307 	struct sdeb_store_info *sip = devip2sip(devip, false);
6308 
6309 	if (!zbc_zone_is_seq(zsp))
6310 		return;
6311 
6312 	zc = zsp->z_cond;
6313 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6314 		zbc_close_zone(devip, zsp);
6315 
6316 	if (zsp->z_cond == ZC4_CLOSED)
6317 		devip->nr_closed--;
6318 
6319 	if (zsp->z_wp > zsp->z_start)
6320 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
6321 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
6322 
6323 	zsp->z_non_seq_resource = false;
6324 	zsp->z_wp = zsp->z_start;
6325 	zsp->z_cond = ZC1_EMPTY;
6326 }
6327 
zbc_rwp_all(struct sdebug_dev_info * devip)6328 static void zbc_rwp_all(struct sdebug_dev_info *devip)
6329 {
6330 	unsigned int i;
6331 
6332 	for (i = 0; i < devip->nr_zones; i++)
6333 		zbc_rwp_zone(devip, &devip->zstate[i]);
6334 }
6335 
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)6336 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6337 {
6338 	struct sdeb_zone_state *zsp;
6339 	int res = 0;
6340 	u64 z_id;
6341 	u8 *cmd = scp->cmnd;
6342 	bool all = cmd[14] & 0x01;
6343 	struct sdeb_store_info *sip = devip2sip(devip, false);
6344 
6345 	if (!sdebug_dev_is_zoned(devip)) {
6346 		mk_sense_invalid_opcode(scp);
6347 		return check_condition_result;
6348 	}
6349 
6350 	sdeb_meta_write_lock(sip);
6351 
6352 	if (all) {
6353 		zbc_rwp_all(devip);
6354 		goto fini;
6355 	}
6356 
6357 	z_id = get_unaligned_be64(cmd + 2);
6358 	if (z_id >= sdebug_capacity) {
6359 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6360 		res = check_condition_result;
6361 		goto fini;
6362 	}
6363 
6364 	zsp = zbc_zone(devip, z_id);
6365 	if (z_id != zsp->z_start) {
6366 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6367 		res = check_condition_result;
6368 		goto fini;
6369 	}
6370 	if (zbc_zone_is_conv(zsp)) {
6371 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6372 		res = check_condition_result;
6373 		goto fini;
6374 	}
6375 
6376 	zbc_rwp_zone(devip, zsp);
6377 fini:
6378 	sdeb_meta_write_unlock(sip);
6379 	return res;
6380 }
6381 
get_tag(struct scsi_cmnd * cmnd)6382 static u32 get_tag(struct scsi_cmnd *cmnd)
6383 {
6384 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
6385 }
6386 
6387 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)6388 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
6389 {
6390 	struct sdebug_scsi_cmd *sdsc = container_of(sd_dp,
6391 					typeof(*sdsc), sd_dp);
6392 	struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1;
6393 	unsigned long flags;
6394 	bool aborted;
6395 
6396 	if (sdebug_statistics) {
6397 		atomic_inc(&sdebug_completions);
6398 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
6399 			atomic_inc(&sdebug_miss_cpus);
6400 	}
6401 
6402 	if (!scp) {
6403 		pr_err("scmd=NULL\n");
6404 		return;
6405 	}
6406 
6407 	spin_lock_irqsave(&sdsc->lock, flags);
6408 	aborted = sd_dp->aborted;
6409 	if (unlikely(aborted))
6410 		sd_dp->aborted = false;
6411 
6412 	spin_unlock_irqrestore(&sdsc->lock, flags);
6413 
6414 	if (aborted) {
6415 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
6416 		blk_abort_request(scsi_cmd_to_rq(scp));
6417 		return;
6418 	}
6419 
6420 	scsi_done(scp); /* callback to mid level */
6421 }
6422 
6423 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)6424 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
6425 {
6426 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
6427 						  hrt);
6428 	sdebug_q_cmd_complete(sd_dp);
6429 	return HRTIMER_NORESTART;
6430 }
6431 
6432 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)6433 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
6434 {
6435 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
6436 						  ew.work);
6437 	sdebug_q_cmd_complete(sd_dp);
6438 }
6439 
6440 static bool got_shared_uuid;
6441 static uuid_t shared_uuid;
6442 
sdebug_device_create_zones(struct sdebug_dev_info * devip)6443 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
6444 {
6445 	struct sdeb_zone_state *zsp;
6446 	sector_t capacity = get_sdebug_capacity();
6447 	sector_t conv_capacity;
6448 	sector_t zstart = 0;
6449 	unsigned int i;
6450 
6451 	/*
6452 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
6453 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
6454 	 * use the specified zone size checking that at least 2 zones can be
6455 	 * created for the device.
6456 	 */
6457 	if (!sdeb_zbc_zone_size_mb) {
6458 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
6459 			>> ilog2(sdebug_sector_size);
6460 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
6461 			devip->zsize >>= 1;
6462 		if (devip->zsize < 2) {
6463 			pr_err("Device capacity too small\n");
6464 			return -EINVAL;
6465 		}
6466 	} else {
6467 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
6468 			pr_err("Zone size is not a power of 2\n");
6469 			return -EINVAL;
6470 		}
6471 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
6472 			>> ilog2(sdebug_sector_size);
6473 		if (devip->zsize >= capacity) {
6474 			pr_err("Zone size too large for device capacity\n");
6475 			return -EINVAL;
6476 		}
6477 	}
6478 
6479 	devip->zsize_shift = ilog2(devip->zsize);
6480 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
6481 
6482 	if (sdeb_zbc_zone_cap_mb == 0) {
6483 		devip->zcap = devip->zsize;
6484 	} else {
6485 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
6486 			      ilog2(sdebug_sector_size);
6487 		if (devip->zcap > devip->zsize) {
6488 			pr_err("Zone capacity too large\n");
6489 			return -EINVAL;
6490 		}
6491 	}
6492 
6493 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
6494 	if (conv_capacity >= capacity) {
6495 		pr_err("Number of conventional zones too large\n");
6496 		return -EINVAL;
6497 	}
6498 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
6499 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
6500 			      devip->zsize_shift;
6501 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
6502 
6503 	/* Add gap zones if zone capacity is smaller than the zone size */
6504 	if (devip->zcap < devip->zsize)
6505 		devip->nr_zones += devip->nr_seq_zones;
6506 
6507 	if (devip->zoned) {
6508 		/* zbc_max_open_zones can be 0, meaning "not reported" */
6509 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
6510 			devip->max_open = (devip->nr_zones - 1) / 2;
6511 		else
6512 			devip->max_open = sdeb_zbc_max_open;
6513 	}
6514 
6515 	devip->zstate = kcalloc(devip->nr_zones,
6516 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
6517 	if (!devip->zstate)
6518 		return -ENOMEM;
6519 
6520 	for (i = 0; i < devip->nr_zones; i++) {
6521 		zsp = &devip->zstate[i];
6522 
6523 		zsp->z_start = zstart;
6524 
6525 		if (i < devip->nr_conv_zones) {
6526 			zsp->z_type = ZBC_ZTYPE_CNV;
6527 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6528 			zsp->z_wp = (sector_t)-1;
6529 			zsp->z_size =
6530 				min_t(u64, devip->zsize, capacity - zstart);
6531 		} else if ((zstart & (devip->zsize - 1)) == 0) {
6532 			if (devip->zoned)
6533 				zsp->z_type = ZBC_ZTYPE_SWR;
6534 			else
6535 				zsp->z_type = ZBC_ZTYPE_SWP;
6536 			zsp->z_cond = ZC1_EMPTY;
6537 			zsp->z_wp = zsp->z_start;
6538 			zsp->z_size =
6539 				min_t(u64, devip->zcap, capacity - zstart);
6540 		} else {
6541 			zsp->z_type = ZBC_ZTYPE_GAP;
6542 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6543 			zsp->z_wp = (sector_t)-1;
6544 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
6545 					    capacity - zstart);
6546 		}
6547 
6548 		WARN_ON_ONCE((int)zsp->z_size <= 0);
6549 		zstart += zsp->z_size;
6550 	}
6551 
6552 	return 0;
6553 }
6554 
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)6555 static struct sdebug_dev_info *sdebug_device_create(
6556 			struct sdebug_host_info *sdbg_host, gfp_t flags)
6557 {
6558 	struct sdebug_dev_info *devip;
6559 
6560 	devip = kzalloc(sizeof(*devip), flags);
6561 	if (devip) {
6562 		if (sdebug_uuid_ctl == 1)
6563 			uuid_gen(&devip->lu_name);
6564 		else if (sdebug_uuid_ctl == 2) {
6565 			if (got_shared_uuid)
6566 				devip->lu_name = shared_uuid;
6567 			else {
6568 				uuid_gen(&shared_uuid);
6569 				got_shared_uuid = true;
6570 				devip->lu_name = shared_uuid;
6571 			}
6572 		}
6573 		devip->sdbg_host = sdbg_host;
6574 		if (sdeb_zbc_in_use) {
6575 			devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
6576 			if (sdebug_device_create_zones(devip)) {
6577 				kfree(devip);
6578 				return NULL;
6579 			}
6580 		} else {
6581 			devip->zoned = false;
6582 		}
6583 		if (sdebug_ptype == TYPE_TAPE) {
6584 			devip->tape_density = TAPE_DEF_DENSITY;
6585 			devip->tape_blksize = TAPE_DEF_BLKSIZE;
6586 		}
6587 		devip->create_ts = ktime_get_boottime();
6588 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
6589 		spin_lock_init(&devip->list_lock);
6590 		INIT_LIST_HEAD(&devip->inject_err_list);
6591 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
6592 	}
6593 	return devip;
6594 }
6595 
find_build_dev_info(struct scsi_device * sdev)6596 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
6597 {
6598 	struct sdebug_host_info *sdbg_host;
6599 	struct sdebug_dev_info *open_devip = NULL;
6600 	struct sdebug_dev_info *devip;
6601 
6602 	sdbg_host = shost_to_sdebug_host(sdev->host);
6603 
6604 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6605 		if ((devip->used) && (devip->channel == sdev->channel) &&
6606 		    (devip->target == sdev->id) &&
6607 		    (devip->lun == sdev->lun))
6608 			return devip;
6609 		else {
6610 			if ((!devip->used) && (!open_devip))
6611 				open_devip = devip;
6612 		}
6613 	}
6614 	if (!open_devip) { /* try and make a new one */
6615 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
6616 		if (!open_devip) {
6617 			pr_err("out of memory at line %d\n", __LINE__);
6618 			return NULL;
6619 		}
6620 	}
6621 
6622 	open_devip->channel = sdev->channel;
6623 	open_devip->target = sdev->id;
6624 	open_devip->lun = sdev->lun;
6625 	open_devip->sdbg_host = sdbg_host;
6626 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
6627 	open_devip->used = true;
6628 	return open_devip;
6629 }
6630 
scsi_debug_sdev_init(struct scsi_device * sdp)6631 static int scsi_debug_sdev_init(struct scsi_device *sdp)
6632 {
6633 	if (sdebug_verbose)
6634 		pr_info("sdev_init <%u %u %u %llu>\n",
6635 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6636 
6637 	return 0;
6638 }
6639 
scsi_debug_sdev_configure(struct scsi_device * sdp,struct queue_limits * lim)6640 static int scsi_debug_sdev_configure(struct scsi_device *sdp,
6641 				     struct queue_limits *lim)
6642 {
6643 	struct sdebug_dev_info *devip =
6644 			(struct sdebug_dev_info *)sdp->hostdata;
6645 	struct dentry *dentry;
6646 
6647 	if (sdebug_verbose)
6648 		pr_info("sdev_configure <%u %u %u %llu>\n",
6649 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6650 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
6651 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
6652 	if (devip == NULL) {
6653 		devip = find_build_dev_info(sdp);
6654 		if (devip == NULL)
6655 			return 1;  /* no resources, will be marked offline */
6656 	}
6657 	if (sdebug_ptype == TYPE_TAPE) {
6658 		if (!devip->tape_blocks[0]) {
6659 			devip->tape_blocks[0] =
6660 				kcalloc(TAPE_UNITS, sizeof(struct tape_block),
6661 					GFP_KERNEL);
6662 			if (!devip->tape_blocks[0])
6663 				return 1;
6664 		}
6665 		devip->tape_pending_nbr_partitions = -1;
6666 		if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) {
6667 			kfree(devip->tape_blocks[0]);
6668 			devip->tape_blocks[0] = NULL;
6669 			return 1;
6670 		}
6671 	}
6672 	sdp->hostdata = devip;
6673 	if (sdebug_no_uld)
6674 		sdp->no_uld_attach = 1;
6675 	config_cdb_len(sdp);
6676 
6677 	if (sdebug_allow_restart)
6678 		sdp->allow_restart = 1;
6679 
6680 	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
6681 				sdebug_debugfs_root);
6682 	if (IS_ERR_OR_NULL(devip->debugfs_entry))
6683 		pr_info("%s: failed to create debugfs directory for device %s\n",
6684 			__func__, dev_name(&sdp->sdev_gendev));
6685 
6686 	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
6687 				&sdebug_error_fops);
6688 	if (IS_ERR_OR_NULL(dentry))
6689 		pr_info("%s: failed to create error file for device %s\n",
6690 			__func__, dev_name(&sdp->sdev_gendev));
6691 
6692 	return 0;
6693 }
6694 
scsi_debug_sdev_destroy(struct scsi_device * sdp)6695 static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
6696 {
6697 	struct sdebug_dev_info *devip =
6698 		(struct sdebug_dev_info *)sdp->hostdata;
6699 	struct sdebug_err_inject *err;
6700 
6701 	if (sdebug_verbose)
6702 		pr_info("sdev_destroy <%u %u %u %llu>\n",
6703 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6704 
6705 	if (!devip)
6706 		return;
6707 
6708 	spin_lock(&devip->list_lock);
6709 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6710 		list_del_rcu(&err->list);
6711 		call_rcu(&err->rcu, sdebug_err_free);
6712 	}
6713 	spin_unlock(&devip->list_lock);
6714 
6715 	debugfs_remove(devip->debugfs_entry);
6716 
6717 	if (sdp->type == TYPE_TAPE) {
6718 		kfree(devip->tape_blocks[0]);
6719 		devip->tape_blocks[0] = NULL;
6720 	}
6721 
6722 	/* make this slot available for re-use */
6723 	devip->used = false;
6724 	sdp->hostdata = NULL;
6725 }
6726 
6727 /* Returns true if cancelled or not running callback. */
scsi_debug_stop_cmnd(struct scsi_cmnd * cmnd)6728 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
6729 {
6730 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6731 	struct sdebug_defer *sd_dp = &sdsc->sd_dp;
6732 	enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t);
6733 
6734 	lockdep_assert_held(&sdsc->lock);
6735 
6736 	if (defer_t == SDEB_DEFER_HRT) {
6737 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
6738 
6739 		switch (res) {
6740 		case -1: /* -1 It's executing the CB */
6741 			return false;
6742 		case 0: /* Not active, it must have already run */
6743 		case 1: /* Was active, we've now cancelled */
6744 		default:
6745 			return true;
6746 		}
6747 	} else if (defer_t == SDEB_DEFER_WQ) {
6748 		/* Cancel if pending */
6749 		if (cancel_work(&sd_dp->ew.work))
6750 			return true;
6751 		/* callback may be running, so return false */
6752 		return false;
6753 	} else if (defer_t == SDEB_DEFER_POLL) {
6754 		return true;
6755 	}
6756 
6757 	return false;
6758 }
6759 
6760 /*
6761  * Called from scsi_debug_abort() only, which is for timed-out cmd.
6762  */
scsi_debug_abort_cmnd(struct scsi_cmnd * cmnd)6763 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
6764 {
6765 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6766 	unsigned long flags;
6767 	bool res;
6768 
6769 	spin_lock_irqsave(&sdsc->lock, flags);
6770 	res = scsi_debug_stop_cmnd(cmnd);
6771 	spin_unlock_irqrestore(&sdsc->lock, flags);
6772 
6773 	return res;
6774 }
6775 
6776 /*
6777  * All we can do is set the cmnd as internally aborted and wait for it to
6778  * finish. We cannot call scsi_done() as normal completion path may do that.
6779  */
sdebug_stop_cmnd(struct request * rq,void * data)6780 static bool sdebug_stop_cmnd(struct request *rq, void *data)
6781 {
6782 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
6783 
6784 	return true;
6785 }
6786 
6787 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)6788 static void stop_all_queued(void)
6789 {
6790 	struct sdebug_host_info *sdhp;
6791 
6792 	mutex_lock(&sdebug_host_list_mutex);
6793 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6794 		struct Scsi_Host *shost = sdhp->shost;
6795 
6796 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
6797 	}
6798 	mutex_unlock(&sdebug_host_list_mutex);
6799 }
6800 
sdebug_fail_abort(struct scsi_cmnd * cmnd)6801 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
6802 {
6803 	struct scsi_device *sdp = cmnd->device;
6804 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6805 	struct sdebug_err_inject *err;
6806 	unsigned char *cmd = cmnd->cmnd;
6807 	int ret = 0;
6808 
6809 	if (devip == NULL)
6810 		return 0;
6811 
6812 	rcu_read_lock();
6813 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6814 		if (err->type == ERR_ABORT_CMD_FAILED &&
6815 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6816 			ret = !!err->cnt;
6817 			if (err->cnt < 0)
6818 				err->cnt++;
6819 
6820 			rcu_read_unlock();
6821 			return ret;
6822 		}
6823 	}
6824 	rcu_read_unlock();
6825 
6826 	return 0;
6827 }
6828 
scsi_debug_abort(struct scsi_cmnd * SCpnt)6829 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
6830 {
6831 	bool aborted = scsi_debug_abort_cmnd(SCpnt);
6832 	u8 *cmd = SCpnt->cmnd;
6833 	u8 opcode = cmd[0];
6834 
6835 	++num_aborts;
6836 
6837 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6838 		sdev_printk(KERN_INFO, SCpnt->device,
6839 			    "%s: command%s found\n", __func__,
6840 			    aborted ? "" : " not");
6841 
6842 
6843 	if (sdebug_fail_abort(SCpnt)) {
6844 		scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
6845 			    opcode);
6846 		return FAILED;
6847 	}
6848 
6849 	if (aborted == false)
6850 		return FAILED;
6851 
6852 	return SUCCESS;
6853 }
6854 
scsi_debug_stop_all_queued_iter(struct request * rq,void * data)6855 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
6856 {
6857 	struct scsi_device *sdp = data;
6858 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
6859 
6860 	if (scmd->device == sdp)
6861 		scsi_debug_abort_cmnd(scmd);
6862 
6863 	return true;
6864 }
6865 
6866 /* Deletes (stops) timers or work queues of all queued commands per sdev */
scsi_debug_stop_all_queued(struct scsi_device * sdp)6867 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
6868 {
6869 	struct Scsi_Host *shost = sdp->host;
6870 
6871 	blk_mq_tagset_busy_iter(&shost->tag_set,
6872 				scsi_debug_stop_all_queued_iter, sdp);
6873 }
6874 
sdebug_fail_lun_reset(struct scsi_cmnd * cmnd)6875 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
6876 {
6877 	struct scsi_device *sdp = cmnd->device;
6878 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6879 	struct sdebug_err_inject *err;
6880 	unsigned char *cmd = cmnd->cmnd;
6881 	int ret = 0;
6882 
6883 	if (devip == NULL)
6884 		return 0;
6885 
6886 	rcu_read_lock();
6887 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6888 		if (err->type == ERR_LUN_RESET_FAILED &&
6889 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6890 			ret = !!err->cnt;
6891 			if (err->cnt < 0)
6892 				err->cnt++;
6893 
6894 			rcu_read_unlock();
6895 			return ret;
6896 		}
6897 	}
6898 	rcu_read_unlock();
6899 
6900 	return 0;
6901 }
6902 
scsi_tape_reset_clear(struct sdebug_dev_info * devip)6903 static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
6904 {
6905 	int i;
6906 
6907 	devip->tape_blksize = TAPE_DEF_BLKSIZE;
6908 	devip->tape_density = TAPE_DEF_DENSITY;
6909 	devip->tape_partition = 0;
6910 	devip->tape_dce = 0;
6911 	for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
6912 		devip->tape_location[i] = 0;
6913 	devip->tape_pending_nbr_partitions = -1;
6914 	/* Don't reset partitioning? */
6915 }
6916 
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)6917 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
6918 {
6919 	struct scsi_device *sdp = SCpnt->device;
6920 	struct sdebug_dev_info *devip = sdp->hostdata;
6921 	u8 *cmd = SCpnt->cmnd;
6922 	u8 opcode = cmd[0];
6923 
6924 	++num_dev_resets;
6925 
6926 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6927 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6928 
6929 	scsi_debug_stop_all_queued(sdp);
6930 	if (devip) {
6931 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
6932 		if (SCpnt->device->type == TYPE_TAPE)
6933 			scsi_tape_reset_clear(devip);
6934 	}
6935 
6936 	if (sdebug_fail_lun_reset(SCpnt)) {
6937 		scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
6938 		return FAILED;
6939 	}
6940 
6941 	return SUCCESS;
6942 }
6943 
sdebug_fail_target_reset(struct scsi_cmnd * cmnd)6944 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
6945 {
6946 	struct scsi_target *starget = scsi_target(cmnd->device);
6947 	struct sdebug_target_info *targetip =
6948 		(struct sdebug_target_info *)starget->hostdata;
6949 
6950 	if (targetip)
6951 		return targetip->reset_fail;
6952 
6953 	return 0;
6954 }
6955 
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)6956 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
6957 {
6958 	struct scsi_device *sdp = SCpnt->device;
6959 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6960 	struct sdebug_dev_info *devip;
6961 	u8 *cmd = SCpnt->cmnd;
6962 	u8 opcode = cmd[0];
6963 	int k = 0;
6964 
6965 	++num_target_resets;
6966 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6967 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6968 
6969 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6970 		if (devip->target == sdp->id) {
6971 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6972 			if (SCpnt->device->type == TYPE_TAPE)
6973 				scsi_tape_reset_clear(devip);
6974 			++k;
6975 		}
6976 	}
6977 
6978 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6979 		sdev_printk(KERN_INFO, sdp,
6980 			    "%s: %d device(s) found in target\n", __func__, k);
6981 
6982 	if (sdebug_fail_target_reset(SCpnt)) {
6983 		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
6984 			    opcode);
6985 		return FAILED;
6986 	}
6987 
6988 	return SUCCESS;
6989 }
6990 
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)6991 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
6992 {
6993 	struct scsi_device *sdp = SCpnt->device;
6994 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6995 	struct sdebug_dev_info *devip;
6996 	int k = 0;
6997 
6998 	++num_bus_resets;
6999 
7000 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
7001 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
7002 
7003 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
7004 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7005 		if (SCpnt->device->type == TYPE_TAPE)
7006 			scsi_tape_reset_clear(devip);
7007 		++k;
7008 	}
7009 
7010 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7011 		sdev_printk(KERN_INFO, sdp,
7012 			    "%s: %d device(s) found in host\n", __func__, k);
7013 	return SUCCESS;
7014 }
7015 
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)7016 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
7017 {
7018 	struct sdebug_host_info *sdbg_host;
7019 	struct sdebug_dev_info *devip;
7020 	int k = 0;
7021 
7022 	++num_host_resets;
7023 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
7024 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
7025 	mutex_lock(&sdebug_host_list_mutex);
7026 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
7027 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
7028 				    dev_list) {
7029 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7030 			if (SCpnt->device->type == TYPE_TAPE)
7031 				scsi_tape_reset_clear(devip);
7032 			++k;
7033 		}
7034 	}
7035 	mutex_unlock(&sdebug_host_list_mutex);
7036 	stop_all_queued();
7037 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7038 		sdev_printk(KERN_INFO, SCpnt->device,
7039 			    "%s: %d device(s) found\n", __func__, k);
7040 	return SUCCESS;
7041 }
7042 
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)7043 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
7044 {
7045 	struct msdos_partition *pp;
7046 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
7047 	int sectors_per_part, num_sectors, k;
7048 	int heads_by_sects, start_sec, end_sec;
7049 
7050 	/* assume partition table already zeroed */
7051 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
7052 		return;
7053 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
7054 		sdebug_num_parts = SDEBUG_MAX_PARTS;
7055 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
7056 	}
7057 	num_sectors = (int)get_sdebug_capacity();
7058 	sectors_per_part = (num_sectors - sdebug_sectors_per)
7059 			   / sdebug_num_parts;
7060 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
7061 	starts[0] = sdebug_sectors_per;
7062 	max_part_secs = sectors_per_part;
7063 	for (k = 1; k < sdebug_num_parts; ++k) {
7064 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
7065 			    * heads_by_sects;
7066 		if (starts[k] - starts[k - 1] < max_part_secs)
7067 			max_part_secs = starts[k] - starts[k - 1];
7068 	}
7069 	starts[sdebug_num_parts] = num_sectors;
7070 	starts[sdebug_num_parts + 1] = 0;
7071 
7072 	ramp[510] = 0x55;	/* magic partition markings */
7073 	ramp[511] = 0xAA;
7074 	pp = (struct msdos_partition *)(ramp + 0x1be);
7075 	for (k = 0; starts[k + 1]; ++k, ++pp) {
7076 		start_sec = starts[k];
7077 		end_sec = starts[k] + max_part_secs - 1;
7078 		pp->boot_ind = 0;
7079 
7080 		pp->cyl = start_sec / heads_by_sects;
7081 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
7082 			   / sdebug_sectors_per;
7083 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
7084 
7085 		pp->end_cyl = end_sec / heads_by_sects;
7086 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
7087 			       / sdebug_sectors_per;
7088 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
7089 
7090 		pp->start_sect = cpu_to_le32(start_sec);
7091 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
7092 		pp->sys_ind = 0x83;	/* plain Linux partition */
7093 	}
7094 }
7095 
block_unblock_all_queues(bool block)7096 static void block_unblock_all_queues(bool block)
7097 {
7098 	struct sdebug_host_info *sdhp;
7099 
7100 	lockdep_assert_held(&sdebug_host_list_mutex);
7101 
7102 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7103 		struct Scsi_Host *shost = sdhp->shost;
7104 
7105 		if (block)
7106 			scsi_block_requests(shost);
7107 		else
7108 			scsi_unblock_requests(shost);
7109 	}
7110 }
7111 
7112 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
7113  * commands will be processed normally before triggers occur.
7114  */
tweak_cmnd_count(void)7115 static void tweak_cmnd_count(void)
7116 {
7117 	int count, modulo;
7118 
7119 	modulo = abs(sdebug_every_nth);
7120 	if (modulo < 2)
7121 		return;
7122 
7123 	mutex_lock(&sdebug_host_list_mutex);
7124 	block_unblock_all_queues(true);
7125 	count = atomic_read(&sdebug_cmnd_count);
7126 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
7127 	block_unblock_all_queues(false);
7128 	mutex_unlock(&sdebug_host_list_mutex);
7129 }
7130 
clear_queue_stats(void)7131 static void clear_queue_stats(void)
7132 {
7133 	atomic_set(&sdebug_cmnd_count, 0);
7134 	atomic_set(&sdebug_completions, 0);
7135 	atomic_set(&sdebug_miss_cpus, 0);
7136 	atomic_set(&sdebug_a_tsf, 0);
7137 }
7138 
inject_on_this_cmd(void)7139 static bool inject_on_this_cmd(void)
7140 {
7141 	if (sdebug_every_nth == 0)
7142 		return false;
7143 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
7144 }
7145 
7146 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
7147 
7148 /* Complete the processing of the thread that queued a SCSI command to this
7149  * driver. It either completes the command by calling cmnd_done() or
7150  * schedules a hr timer or work queue then returns 0. Returns
7151  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
7152  */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)7153 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
7154 			 int scsi_result,
7155 			 int (*pfp)(struct scsi_cmnd *,
7156 				    struct sdebug_dev_info *),
7157 			 int delta_jiff, int ndelay)
7158 {
7159 	struct request *rq = scsi_cmd_to_rq(cmnd);
7160 	bool polled = rq->cmd_flags & REQ_POLLED;
7161 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
7162 	unsigned long flags;
7163 	u64 ns_from_boot = 0;
7164 	struct scsi_device *sdp;
7165 	struct sdebug_defer *sd_dp;
7166 
7167 	if (unlikely(devip == NULL)) {
7168 		if (scsi_result == 0)
7169 			scsi_result = DID_NO_CONNECT << 16;
7170 		goto respond_in_thread;
7171 	}
7172 	sdp = cmnd->device;
7173 
7174 	if (delta_jiff == 0)
7175 		goto respond_in_thread;
7176 
7177 
7178 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
7179 		     (scsi_result == 0))) {
7180 		int num_in_q = scsi_device_busy(sdp);
7181 		int qdepth = cmnd->device->queue_depth;
7182 
7183 		if ((num_in_q == qdepth) &&
7184 		    (atomic_inc_return(&sdebug_a_tsf) >=
7185 		     abs(sdebug_every_nth))) {
7186 			atomic_set(&sdebug_a_tsf, 0);
7187 			scsi_result = device_qfull_result;
7188 
7189 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
7190 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
7191 					    __func__, num_in_q);
7192 		}
7193 	}
7194 
7195 	sd_dp = &sdsc->sd_dp;
7196 
7197 	if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
7198 		ns_from_boot = ktime_get_boottime_ns();
7199 
7200 	/* one of the resp_*() response functions is called here */
7201 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
7202 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
7203 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
7204 		delta_jiff = ndelay = 0;
7205 	}
7206 	if (cmnd->result == 0 && scsi_result != 0)
7207 		cmnd->result = scsi_result;
7208 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
7209 		if (atomic_read(&sdeb_inject_pending)) {
7210 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
7211 			atomic_set(&sdeb_inject_pending, 0);
7212 			cmnd->result = check_condition_result;
7213 		}
7214 	}
7215 
7216 	if (unlikely(sdebug_verbose && cmnd->result))
7217 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
7218 			    __func__, cmnd->result);
7219 
7220 	if (delta_jiff > 0 || ndelay > 0) {
7221 		ktime_t kt;
7222 
7223 		if (delta_jiff > 0) {
7224 			u64 ns = jiffies_to_nsecs(delta_jiff);
7225 
7226 			if (sdebug_random && ns < U32_MAX) {
7227 				ns = get_random_u32_below((u32)ns);
7228 			} else if (sdebug_random) {
7229 				ns >>= 12;	/* scale to 4 usec precision */
7230 				if (ns < U32_MAX)	/* over 4 hours max */
7231 					ns = get_random_u32_below((u32)ns);
7232 				ns <<= 12;
7233 			}
7234 			kt = ns_to_ktime(ns);
7235 		} else {	/* ndelay has a 4.2 second max */
7236 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
7237 					     (u32)ndelay;
7238 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
7239 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
7240 
7241 				if (kt <= d) {	/* elapsed duration >= kt */
7242 					/* call scsi_done() from this thread */
7243 					scsi_done(cmnd);
7244 					return 0;
7245 				}
7246 				/* otherwise reduce kt by elapsed time */
7247 				kt -= d;
7248 			}
7249 		}
7250 		if (sdebug_statistics)
7251 			sd_dp->issuing_cpu = raw_smp_processor_id();
7252 		if (polled) {
7253 			spin_lock_irqsave(&sdsc->lock, flags);
7254 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
7255 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
7256 			spin_unlock_irqrestore(&sdsc->lock, flags);
7257 		} else {
7258 			/* schedule the invocation of scsi_done() for a later time */
7259 			spin_lock_irqsave(&sdsc->lock, flags);
7260 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
7261 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
7262 			/*
7263 			 * The completion handler will try to grab sqcp->lock,
7264 			 * so there is no chance that the completion handler
7265 			 * will call scsi_done() until we release the lock
7266 			 * here (so ok to keep referencing sdsc).
7267 			 */
7268 			spin_unlock_irqrestore(&sdsc->lock, flags);
7269 		}
7270 	} else {	/* jdelay < 0, use work queue */
7271 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
7272 			     atomic_read(&sdeb_inject_pending))) {
7273 			sd_dp->aborted = true;
7274 			atomic_set(&sdeb_inject_pending, 0);
7275 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
7276 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
7277 		}
7278 
7279 		if (sdebug_statistics)
7280 			sd_dp->issuing_cpu = raw_smp_processor_id();
7281 		if (polled) {
7282 			spin_lock_irqsave(&sdsc->lock, flags);
7283 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
7284 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
7285 			spin_unlock_irqrestore(&sdsc->lock, flags);
7286 		} else {
7287 			spin_lock_irqsave(&sdsc->lock, flags);
7288 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
7289 			schedule_work(&sd_dp->ew.work);
7290 			spin_unlock_irqrestore(&sdsc->lock, flags);
7291 		}
7292 	}
7293 
7294 	return 0;
7295 
7296 respond_in_thread:	/* call back to mid-layer using invocation thread */
7297 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
7298 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
7299 	if (cmnd->result == 0 && scsi_result != 0)
7300 		cmnd->result = scsi_result;
7301 	scsi_done(cmnd);
7302 	return 0;
7303 }
7304 
7305 /* Note: The following macros create attribute files in the
7306    /sys/module/scsi_debug/parameters directory. Unfortunately this
7307    driver is unaware of a change and cannot trigger auxiliary actions
7308    as it can when the corresponding attribute in the
7309    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
7310  */
7311 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
7312 module_param_named(ato, sdebug_ato, int, S_IRUGO);
7313 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
7314 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
7315 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
7316 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
7317 module_param_named(dif, sdebug_dif, int, S_IRUGO);
7318 module_param_named(dix, sdebug_dix, int, S_IRUGO);
7319 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
7320 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
7321 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
7322 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
7323 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
7324 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
7325 module_param_string(inq_product, sdebug_inq_product_id,
7326 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
7327 module_param_string(inq_rev, sdebug_inq_product_rev,
7328 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
7329 module_param_string(inq_vendor, sdebug_inq_vendor_id,
7330 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
7331 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
7332 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
7333 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
7334 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
7335 module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
7336 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
7337 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
7338 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
7339 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
7340 module_param_named(medium_error_count, sdebug_medium_error_count, int,
7341 		   S_IRUGO | S_IWUSR);
7342 module_param_named(medium_error_start, sdebug_medium_error_start, int,
7343 		   S_IRUGO | S_IWUSR);
7344 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
7345 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
7346 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
7347 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
7348 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
7349 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
7350 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
7351 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
7352 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
7353 module_param_named(per_host_store, sdebug_per_host_store, bool,
7354 		   S_IRUGO | S_IWUSR);
7355 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
7356 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
7357 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
7358 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
7359 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
7360 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
7361 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
7362 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
7363 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
7364 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
7365 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
7366 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
7367 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
7368 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
7369 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
7370 module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
7371 module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
7372 module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
7373 module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
7374 module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
7375 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
7376 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
7377 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
7378 		   S_IRUGO | S_IWUSR);
7379 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
7380 module_param_named(write_same_length, sdebug_write_same_length, int,
7381 		   S_IRUGO | S_IWUSR);
7382 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
7383 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
7384 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
7385 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
7386 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
7387 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
7388 
7389 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
7390 MODULE_DESCRIPTION("SCSI debug adapter driver");
7391 MODULE_LICENSE("GPL");
7392 MODULE_VERSION(SDEBUG_VERSION);
7393 
7394 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
7395 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
7396 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
7397 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
7398 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
7399 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
7400 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
7401 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
7402 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
7403 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
7404 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
7405 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
7406 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
7407 MODULE_PARM_DESC(host_max_queue,
7408 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
7409 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
7410 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
7411 		 SDEBUG_VERSION "\")");
7412 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
7413 MODULE_PARM_DESC(lbprz,
7414 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
7415 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
7416 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
7417 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
7418 MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
7419 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
7420 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
7421 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
7422 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
7423 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
7424 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
7425 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
7426 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
7427 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
7428 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
7429 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
7430 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
7431 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
7432 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
7433 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
7434 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
7435 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
7436 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
7437 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
7438 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
7439 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
7440 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
7441 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
7442 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
7443 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
7444 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
7445 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
7446 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
7447 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
7448 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
7449 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
7450 MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
7451 MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
7452 MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
7453 MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
7454 MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
7455 MODULE_PARM_DESC(uuid_ctl,
7456 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
7457 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
7458 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
7459 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
7460 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
7461 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
7462 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
7463 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
7464 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
7465 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
7466 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
7467 
7468 #define SDEBUG_INFO_LEN 256
7469 static char sdebug_info[SDEBUG_INFO_LEN];
7470 
scsi_debug_info(struct Scsi_Host * shp)7471 static const char *scsi_debug_info(struct Scsi_Host *shp)
7472 {
7473 	int k;
7474 
7475 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
7476 		      my_name, SDEBUG_VERSION, sdebug_version_date);
7477 	if (k >= (SDEBUG_INFO_LEN - 1))
7478 		return sdebug_info;
7479 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
7480 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
7481 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
7482 		  "statistics", (int)sdebug_statistics);
7483 	return sdebug_info;
7484 }
7485 
7486 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)7487 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
7488 				 int length)
7489 {
7490 	char arr[16];
7491 	int opts;
7492 	int minLen = length > 15 ? 15 : length;
7493 
7494 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
7495 		return -EACCES;
7496 	memcpy(arr, buffer, minLen);
7497 	arr[minLen] = '\0';
7498 	if (1 != sscanf(arr, "%d", &opts))
7499 		return -EINVAL;
7500 	sdebug_opts = opts;
7501 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7502 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7503 	if (sdebug_every_nth != 0)
7504 		tweak_cmnd_count();
7505 	return length;
7506 }
7507 
7508 struct sdebug_submit_queue_data {
7509 	int *first;
7510 	int *last;
7511 	int queue_num;
7512 };
7513 
sdebug_submit_queue_iter(struct request * rq,void * opaque)7514 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
7515 {
7516 	struct sdebug_submit_queue_data *data = opaque;
7517 	u32 unique_tag = blk_mq_unique_tag(rq);
7518 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7519 	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
7520 	int queue_num = data->queue_num;
7521 
7522 	if (hwq != queue_num)
7523 		return true;
7524 
7525 	/* Rely on iter'ing in ascending tag order */
7526 	if (*data->first == -1)
7527 		*data->first = *data->last = tag;
7528 	else
7529 		*data->last = tag;
7530 
7531 	return true;
7532 }
7533 
7534 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
7535  * same for each scsi_debug host (if more than one). Some of the counters
7536  * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)7537 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
7538 {
7539 	struct sdebug_host_info *sdhp;
7540 	int j;
7541 
7542 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
7543 		   SDEBUG_VERSION, sdebug_version_date);
7544 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
7545 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
7546 		   sdebug_opts, sdebug_every_nth);
7547 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
7548 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
7549 		   sdebug_sector_size, "bytes");
7550 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
7551 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
7552 		   num_aborts);
7553 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
7554 		   num_dev_resets, num_target_resets, num_bus_resets,
7555 		   num_host_resets);
7556 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
7557 		   dix_reads, dix_writes, dif_errors);
7558 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
7559 		   sdebug_statistics);
7560 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
7561 		   atomic_read(&sdebug_cmnd_count),
7562 		   atomic_read(&sdebug_completions),
7563 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
7564 		   atomic_read(&sdebug_a_tsf),
7565 		   atomic_read(&sdeb_mq_poll_count));
7566 
7567 	seq_printf(m, "submit_queues=%d\n", submit_queues);
7568 	for (j = 0; j < submit_queues; ++j) {
7569 		int f = -1, l = -1;
7570 		struct sdebug_submit_queue_data data = {
7571 			.queue_num = j,
7572 			.first = &f,
7573 			.last = &l,
7574 		};
7575 		seq_printf(m, "  queue %d:\n", j);
7576 		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
7577 					&data);
7578 		if (f >= 0) {
7579 			seq_printf(m, "    BUSY: %s: %d,%d\n",
7580 				   "first,last bits", f, l);
7581 		}
7582 	}
7583 
7584 	seq_printf(m, "this host_no=%d\n", host->host_no);
7585 	if (!xa_empty(per_store_ap)) {
7586 		bool niu;
7587 		int idx;
7588 		unsigned long l_idx;
7589 		struct sdeb_store_info *sip;
7590 
7591 		seq_puts(m, "\nhost list:\n");
7592 		j = 0;
7593 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7594 			idx = sdhp->si_idx;
7595 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
7596 				   sdhp->shost->host_no, idx);
7597 			++j;
7598 		}
7599 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
7600 			   sdeb_most_recent_idx);
7601 		j = 0;
7602 		xa_for_each(per_store_ap, l_idx, sip) {
7603 			niu = xa_get_mark(per_store_ap, l_idx,
7604 					  SDEB_XA_NOT_IN_USE);
7605 			idx = (int)l_idx;
7606 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
7607 				   (niu ? "  not_in_use" : ""));
7608 			++j;
7609 		}
7610 	}
7611 	return 0;
7612 }
7613 
delay_show(struct device_driver * ddp,char * buf)7614 static ssize_t delay_show(struct device_driver *ddp, char *buf)
7615 {
7616 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
7617 }
7618 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
7619  * of delay is jiffies.
7620  */
delay_store(struct device_driver * ddp,const char * buf,size_t count)7621 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
7622 			   size_t count)
7623 {
7624 	int jdelay, res;
7625 
7626 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
7627 		res = count;
7628 		if (sdebug_jdelay != jdelay) {
7629 			struct sdebug_host_info *sdhp;
7630 
7631 			mutex_lock(&sdebug_host_list_mutex);
7632 			block_unblock_all_queues(true);
7633 
7634 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7635 				struct Scsi_Host *shost = sdhp->shost;
7636 
7637 				if (scsi_host_busy(shost)) {
7638 					res = -EBUSY;   /* queued commands */
7639 					break;
7640 				}
7641 			}
7642 			if (res > 0) {
7643 				sdebug_jdelay = jdelay;
7644 				sdebug_ndelay = 0;
7645 			}
7646 			block_unblock_all_queues(false);
7647 			mutex_unlock(&sdebug_host_list_mutex);
7648 		}
7649 		return res;
7650 	}
7651 	return -EINVAL;
7652 }
7653 static DRIVER_ATTR_RW(delay);
7654 
ndelay_show(struct device_driver * ddp,char * buf)7655 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
7656 {
7657 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
7658 }
7659 /* Returns -EBUSY if ndelay is being changed and commands are queued */
7660 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)7661 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
7662 			    size_t count)
7663 {
7664 	int ndelay, res;
7665 
7666 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
7667 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
7668 		res = count;
7669 		if (sdebug_ndelay != ndelay) {
7670 			struct sdebug_host_info *sdhp;
7671 
7672 			mutex_lock(&sdebug_host_list_mutex);
7673 			block_unblock_all_queues(true);
7674 
7675 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7676 				struct Scsi_Host *shost = sdhp->shost;
7677 
7678 				if (scsi_host_busy(shost)) {
7679 					res = -EBUSY;   /* queued commands */
7680 					break;
7681 				}
7682 			}
7683 
7684 			if (res > 0) {
7685 				sdebug_ndelay = ndelay;
7686 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
7687 							: DEF_JDELAY;
7688 			}
7689 			block_unblock_all_queues(false);
7690 			mutex_unlock(&sdebug_host_list_mutex);
7691 		}
7692 		return res;
7693 	}
7694 	return -EINVAL;
7695 }
7696 static DRIVER_ATTR_RW(ndelay);
7697 
opts_show(struct device_driver * ddp,char * buf)7698 static ssize_t opts_show(struct device_driver *ddp, char *buf)
7699 {
7700 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
7701 }
7702 
opts_store(struct device_driver * ddp,const char * buf,size_t count)7703 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
7704 			  size_t count)
7705 {
7706 	int opts;
7707 	char work[20];
7708 
7709 	if (sscanf(buf, "%10s", work) == 1) {
7710 		if (strncasecmp(work, "0x", 2) == 0) {
7711 			if (kstrtoint(work + 2, 16, &opts) == 0)
7712 				goto opts_done;
7713 		} else {
7714 			if (kstrtoint(work, 10, &opts) == 0)
7715 				goto opts_done;
7716 		}
7717 	}
7718 	return -EINVAL;
7719 opts_done:
7720 	sdebug_opts = opts;
7721 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7722 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7723 	tweak_cmnd_count();
7724 	return count;
7725 }
7726 static DRIVER_ATTR_RW(opts);
7727 
ptype_show(struct device_driver * ddp,char * buf)7728 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
7729 {
7730 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
7731 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)7732 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
7733 			   size_t count)
7734 {
7735 	int n;
7736 
7737 	/* Cannot change from or to TYPE_ZBC with sysfs */
7738 	if (sdebug_ptype == TYPE_ZBC)
7739 		return -EINVAL;
7740 
7741 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7742 		if (n == TYPE_ZBC)
7743 			return -EINVAL;
7744 		sdebug_ptype = n;
7745 		return count;
7746 	}
7747 	return -EINVAL;
7748 }
7749 static DRIVER_ATTR_RW(ptype);
7750 
dsense_show(struct device_driver * ddp,char * buf)7751 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
7752 {
7753 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
7754 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)7755 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
7756 			    size_t count)
7757 {
7758 	int n;
7759 
7760 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7761 		sdebug_dsense = n;
7762 		return count;
7763 	}
7764 	return -EINVAL;
7765 }
7766 static DRIVER_ATTR_RW(dsense);
7767 
fake_rw_show(struct device_driver * ddp,char * buf)7768 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
7769 {
7770 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
7771 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)7772 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
7773 			     size_t count)
7774 {
7775 	int n, idx;
7776 
7777 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7778 		bool want_store = (n == 0);
7779 		struct sdebug_host_info *sdhp;
7780 
7781 		n = (n > 0);
7782 		sdebug_fake_rw = (sdebug_fake_rw > 0);
7783 		if (sdebug_fake_rw == n)
7784 			return count;	/* not transitioning so do nothing */
7785 
7786 		if (want_store) {	/* 1 --> 0 transition, set up store */
7787 			if (sdeb_first_idx < 0) {
7788 				idx = sdebug_add_store();
7789 				if (idx < 0)
7790 					return idx;
7791 			} else {
7792 				idx = sdeb_first_idx;
7793 				xa_clear_mark(per_store_ap, idx,
7794 					      SDEB_XA_NOT_IN_USE);
7795 			}
7796 			/* make all hosts use same store */
7797 			list_for_each_entry(sdhp, &sdebug_host_list,
7798 					    host_list) {
7799 				if (sdhp->si_idx != idx) {
7800 					xa_set_mark(per_store_ap, sdhp->si_idx,
7801 						    SDEB_XA_NOT_IN_USE);
7802 					sdhp->si_idx = idx;
7803 				}
7804 			}
7805 			sdeb_most_recent_idx = idx;
7806 		} else {	/* 0 --> 1 transition is trigger for shrink */
7807 			sdebug_erase_all_stores(true /* apart from first */);
7808 		}
7809 		sdebug_fake_rw = n;
7810 		return count;
7811 	}
7812 	return -EINVAL;
7813 }
7814 static DRIVER_ATTR_RW(fake_rw);
7815 
no_lun_0_show(struct device_driver * ddp,char * buf)7816 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
7817 {
7818 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
7819 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)7820 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
7821 			      size_t count)
7822 {
7823 	int n;
7824 
7825 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7826 		sdebug_no_lun_0 = n;
7827 		return count;
7828 	}
7829 	return -EINVAL;
7830 }
7831 static DRIVER_ATTR_RW(no_lun_0);
7832 
num_tgts_show(struct device_driver * ddp,char * buf)7833 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
7834 {
7835 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
7836 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)7837 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
7838 			      size_t count)
7839 {
7840 	int n;
7841 
7842 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7843 		sdebug_num_tgts = n;
7844 		sdebug_max_tgts_luns();
7845 		return count;
7846 	}
7847 	return -EINVAL;
7848 }
7849 static DRIVER_ATTR_RW(num_tgts);
7850 
dev_size_mb_show(struct device_driver * ddp,char * buf)7851 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
7852 {
7853 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
7854 }
7855 static DRIVER_ATTR_RO(dev_size_mb);
7856 
per_host_store_show(struct device_driver * ddp,char * buf)7857 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
7858 {
7859 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
7860 }
7861 
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)7862 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
7863 				    size_t count)
7864 {
7865 	bool v;
7866 
7867 	if (kstrtobool(buf, &v))
7868 		return -EINVAL;
7869 
7870 	sdebug_per_host_store = v;
7871 	return count;
7872 }
7873 static DRIVER_ATTR_RW(per_host_store);
7874 
num_parts_show(struct device_driver * ddp,char * buf)7875 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
7876 {
7877 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
7878 }
7879 static DRIVER_ATTR_RO(num_parts);
7880 
every_nth_show(struct device_driver * ddp,char * buf)7881 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
7882 {
7883 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
7884 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)7885 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
7886 			       size_t count)
7887 {
7888 	int nth;
7889 	char work[20];
7890 
7891 	if (sscanf(buf, "%10s", work) == 1) {
7892 		if (strncasecmp(work, "0x", 2) == 0) {
7893 			if (kstrtoint(work + 2, 16, &nth) == 0)
7894 				goto every_nth_done;
7895 		} else {
7896 			if (kstrtoint(work, 10, &nth) == 0)
7897 				goto every_nth_done;
7898 		}
7899 	}
7900 	return -EINVAL;
7901 
7902 every_nth_done:
7903 	sdebug_every_nth = nth;
7904 	if (nth && !sdebug_statistics) {
7905 		pr_info("every_nth needs statistics=1, set it\n");
7906 		sdebug_statistics = true;
7907 	}
7908 	tweak_cmnd_count();
7909 	return count;
7910 }
7911 static DRIVER_ATTR_RW(every_nth);
7912 
lun_format_show(struct device_driver * ddp,char * buf)7913 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
7914 {
7915 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
7916 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)7917 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
7918 				size_t count)
7919 {
7920 	int n;
7921 	bool changed;
7922 
7923 	if (kstrtoint(buf, 0, &n))
7924 		return -EINVAL;
7925 	if (n >= 0) {
7926 		if (n > (int)SAM_LUN_AM_FLAT) {
7927 			pr_warn("only LUN address methods 0 and 1 are supported\n");
7928 			return -EINVAL;
7929 		}
7930 		changed = ((int)sdebug_lun_am != n);
7931 		sdebug_lun_am = n;
7932 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
7933 			struct sdebug_host_info *sdhp;
7934 			struct sdebug_dev_info *dp;
7935 
7936 			mutex_lock(&sdebug_host_list_mutex);
7937 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7938 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
7939 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
7940 				}
7941 			}
7942 			mutex_unlock(&sdebug_host_list_mutex);
7943 		}
7944 		return count;
7945 	}
7946 	return -EINVAL;
7947 }
7948 static DRIVER_ATTR_RW(lun_format);
7949 
max_luns_show(struct device_driver * ddp,char * buf)7950 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
7951 {
7952 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
7953 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)7954 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
7955 			      size_t count)
7956 {
7957 	int n;
7958 	bool changed;
7959 
7960 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7961 		if (n > 256) {
7962 			pr_warn("max_luns can be no more than 256\n");
7963 			return -EINVAL;
7964 		}
7965 		changed = (sdebug_max_luns != n);
7966 		sdebug_max_luns = n;
7967 		sdebug_max_tgts_luns();
7968 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
7969 			struct sdebug_host_info *sdhp;
7970 			struct sdebug_dev_info *dp;
7971 
7972 			mutex_lock(&sdebug_host_list_mutex);
7973 			list_for_each_entry(sdhp, &sdebug_host_list,
7974 					    host_list) {
7975 				list_for_each_entry(dp, &sdhp->dev_info_list,
7976 						    dev_list) {
7977 					set_bit(SDEBUG_UA_LUNS_CHANGED,
7978 						dp->uas_bm);
7979 				}
7980 			}
7981 			mutex_unlock(&sdebug_host_list_mutex);
7982 		}
7983 		return count;
7984 	}
7985 	return -EINVAL;
7986 }
7987 static DRIVER_ATTR_RW(max_luns);
7988 
max_queue_show(struct device_driver * ddp,char * buf)7989 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
7990 {
7991 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
7992 }
7993 /* N.B. max_queue can be changed while there are queued commands. In flight
7994  * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)7995 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
7996 			       size_t count)
7997 {
7998 	int n;
7999 
8000 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
8001 	    (n <= SDEBUG_CANQUEUE) &&
8002 	    (sdebug_host_max_queue == 0)) {
8003 		mutex_lock(&sdebug_host_list_mutex);
8004 
8005 		/* We may only change sdebug_max_queue when we have no shosts */
8006 		if (list_empty(&sdebug_host_list))
8007 			sdebug_max_queue = n;
8008 		else
8009 			count = -EBUSY;
8010 		mutex_unlock(&sdebug_host_list_mutex);
8011 		return count;
8012 	}
8013 	return -EINVAL;
8014 }
8015 static DRIVER_ATTR_RW(max_queue);
8016 
host_max_queue_show(struct device_driver * ddp,char * buf)8017 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
8018 {
8019 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
8020 }
8021 
no_rwlock_show(struct device_driver * ddp,char * buf)8022 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
8023 {
8024 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
8025 }
8026 
no_rwlock_store(struct device_driver * ddp,const char * buf,size_t count)8027 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
8028 {
8029 	bool v;
8030 
8031 	if (kstrtobool(buf, &v))
8032 		return -EINVAL;
8033 
8034 	sdebug_no_rwlock = v;
8035 	return count;
8036 }
8037 static DRIVER_ATTR_RW(no_rwlock);
8038 
8039 /*
8040  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
8041  * in range [0, sdebug_host_max_queue), we can't change it.
8042  */
8043 static DRIVER_ATTR_RO(host_max_queue);
8044 
no_uld_show(struct device_driver * ddp,char * buf)8045 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
8046 {
8047 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
8048 }
8049 static DRIVER_ATTR_RO(no_uld);
8050 
scsi_level_show(struct device_driver * ddp,char * buf)8051 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
8052 {
8053 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
8054 }
8055 static DRIVER_ATTR_RO(scsi_level);
8056 
virtual_gb_show(struct device_driver * ddp,char * buf)8057 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
8058 {
8059 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
8060 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)8061 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
8062 				size_t count)
8063 {
8064 	int n;
8065 	bool changed;
8066 
8067 	/* Ignore capacity change for ZBC drives for now */
8068 	if (sdeb_zbc_in_use)
8069 		return -ENOTSUPP;
8070 
8071 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8072 		changed = (sdebug_virtual_gb != n);
8073 		sdebug_virtual_gb = n;
8074 		sdebug_capacity = get_sdebug_capacity();
8075 		if (changed) {
8076 			struct sdebug_host_info *sdhp;
8077 			struct sdebug_dev_info *dp;
8078 
8079 			mutex_lock(&sdebug_host_list_mutex);
8080 			list_for_each_entry(sdhp, &sdebug_host_list,
8081 					    host_list) {
8082 				list_for_each_entry(dp, &sdhp->dev_info_list,
8083 						    dev_list) {
8084 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
8085 						dp->uas_bm);
8086 				}
8087 			}
8088 			mutex_unlock(&sdebug_host_list_mutex);
8089 		}
8090 		return count;
8091 	}
8092 	return -EINVAL;
8093 }
8094 static DRIVER_ATTR_RW(virtual_gb);
8095 
add_host_show(struct device_driver * ddp,char * buf)8096 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
8097 {
8098 	/* absolute number of hosts currently active is what is shown */
8099 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
8100 }
8101 
add_host_store(struct device_driver * ddp,const char * buf,size_t count)8102 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
8103 			      size_t count)
8104 {
8105 	bool found;
8106 	unsigned long idx;
8107 	struct sdeb_store_info *sip;
8108 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
8109 	int delta_hosts;
8110 
8111 	if (sscanf(buf, "%d", &delta_hosts) != 1)
8112 		return -EINVAL;
8113 	if (delta_hosts > 0) {
8114 		do {
8115 			found = false;
8116 			if (want_phs) {
8117 				xa_for_each_marked(per_store_ap, idx, sip,
8118 						   SDEB_XA_NOT_IN_USE) {
8119 					sdeb_most_recent_idx = (int)idx;
8120 					found = true;
8121 					break;
8122 				}
8123 				if (found)	/* re-use case */
8124 					sdebug_add_host_helper((int)idx);
8125 				else
8126 					sdebug_do_add_host(true);
8127 			} else {
8128 				sdebug_do_add_host(false);
8129 			}
8130 		} while (--delta_hosts);
8131 	} else if (delta_hosts < 0) {
8132 		do {
8133 			sdebug_do_remove_host(false);
8134 		} while (++delta_hosts);
8135 	}
8136 	return count;
8137 }
8138 static DRIVER_ATTR_RW(add_host);
8139 
vpd_use_hostno_show(struct device_driver * ddp,char * buf)8140 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
8141 {
8142 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
8143 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)8144 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
8145 				    size_t count)
8146 {
8147 	int n;
8148 
8149 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8150 		sdebug_vpd_use_hostno = n;
8151 		return count;
8152 	}
8153 	return -EINVAL;
8154 }
8155 static DRIVER_ATTR_RW(vpd_use_hostno);
8156 
statistics_show(struct device_driver * ddp,char * buf)8157 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
8158 {
8159 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
8160 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)8161 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
8162 				size_t count)
8163 {
8164 	int n;
8165 
8166 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
8167 		if (n > 0)
8168 			sdebug_statistics = true;
8169 		else {
8170 			clear_queue_stats();
8171 			sdebug_statistics = false;
8172 		}
8173 		return count;
8174 	}
8175 	return -EINVAL;
8176 }
8177 static DRIVER_ATTR_RW(statistics);
8178 
sector_size_show(struct device_driver * ddp,char * buf)8179 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
8180 {
8181 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
8182 }
8183 static DRIVER_ATTR_RO(sector_size);
8184 
submit_queues_show(struct device_driver * ddp,char * buf)8185 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
8186 {
8187 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
8188 }
8189 static DRIVER_ATTR_RO(submit_queues);
8190 
dix_show(struct device_driver * ddp,char * buf)8191 static ssize_t dix_show(struct device_driver *ddp, char *buf)
8192 {
8193 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
8194 }
8195 static DRIVER_ATTR_RO(dix);
8196 
dif_show(struct device_driver * ddp,char * buf)8197 static ssize_t dif_show(struct device_driver *ddp, char *buf)
8198 {
8199 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
8200 }
8201 static DRIVER_ATTR_RO(dif);
8202 
guard_show(struct device_driver * ddp,char * buf)8203 static ssize_t guard_show(struct device_driver *ddp, char *buf)
8204 {
8205 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
8206 }
8207 static DRIVER_ATTR_RO(guard);
8208 
ato_show(struct device_driver * ddp,char * buf)8209 static ssize_t ato_show(struct device_driver *ddp, char *buf)
8210 {
8211 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
8212 }
8213 static DRIVER_ATTR_RO(ato);
8214 
map_show(struct device_driver * ddp,char * buf)8215 static ssize_t map_show(struct device_driver *ddp, char *buf)
8216 {
8217 	ssize_t count = 0;
8218 
8219 	if (!scsi_debug_lbp())
8220 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
8221 				 sdebug_store_sectors);
8222 
8223 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
8224 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
8225 
8226 		if (sip)
8227 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
8228 					  (int)map_size, sip->map_storep);
8229 	}
8230 	buf[count++] = '\n';
8231 	buf[count] = '\0';
8232 
8233 	return count;
8234 }
8235 static DRIVER_ATTR_RO(map);
8236 
random_show(struct device_driver * ddp,char * buf)8237 static ssize_t random_show(struct device_driver *ddp, char *buf)
8238 {
8239 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
8240 }
8241 
random_store(struct device_driver * ddp,const char * buf,size_t count)8242 static ssize_t random_store(struct device_driver *ddp, const char *buf,
8243 			    size_t count)
8244 {
8245 	bool v;
8246 
8247 	if (kstrtobool(buf, &v))
8248 		return -EINVAL;
8249 
8250 	sdebug_random = v;
8251 	return count;
8252 }
8253 static DRIVER_ATTR_RW(random);
8254 
removable_show(struct device_driver * ddp,char * buf)8255 static ssize_t removable_show(struct device_driver *ddp, char *buf)
8256 {
8257 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
8258 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)8259 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
8260 			       size_t count)
8261 {
8262 	int n;
8263 
8264 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8265 		sdebug_removable = (n > 0);
8266 		return count;
8267 	}
8268 	return -EINVAL;
8269 }
8270 static DRIVER_ATTR_RW(removable);
8271 
host_lock_show(struct device_driver * ddp,char * buf)8272 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
8273 {
8274 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
8275 }
8276 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)8277 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
8278 			       size_t count)
8279 {
8280 	int n;
8281 
8282 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8283 		sdebug_host_lock = (n > 0);
8284 		return count;
8285 	}
8286 	return -EINVAL;
8287 }
8288 static DRIVER_ATTR_RW(host_lock);
8289 
strict_show(struct device_driver * ddp,char * buf)8290 static ssize_t strict_show(struct device_driver *ddp, char *buf)
8291 {
8292 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
8293 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)8294 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
8295 			    size_t count)
8296 {
8297 	int n;
8298 
8299 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8300 		sdebug_strict = (n > 0);
8301 		return count;
8302 	}
8303 	return -EINVAL;
8304 }
8305 static DRIVER_ATTR_RW(strict);
8306 
uuid_ctl_show(struct device_driver * ddp,char * buf)8307 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
8308 {
8309 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
8310 }
8311 static DRIVER_ATTR_RO(uuid_ctl);
8312 
cdb_len_show(struct device_driver * ddp,char * buf)8313 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
8314 {
8315 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
8316 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)8317 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
8318 			     size_t count)
8319 {
8320 	int ret, n;
8321 
8322 	ret = kstrtoint(buf, 0, &n);
8323 	if (ret)
8324 		return ret;
8325 	sdebug_cdb_len = n;
8326 	all_config_cdb_len();
8327 	return count;
8328 }
8329 static DRIVER_ATTR_RW(cdb_len);
8330 
8331 static const char * const zbc_model_strs_a[] = {
8332 	[BLK_ZONED_NONE] = "none",
8333 	[BLK_ZONED_HA]   = "host-aware",
8334 	[BLK_ZONED_HM]   = "host-managed",
8335 };
8336 
8337 static const char * const zbc_model_strs_b[] = {
8338 	[BLK_ZONED_NONE] = "no",
8339 	[BLK_ZONED_HA]   = "aware",
8340 	[BLK_ZONED_HM]   = "managed",
8341 };
8342 
8343 static const char * const zbc_model_strs_c[] = {
8344 	[BLK_ZONED_NONE] = "0",
8345 	[BLK_ZONED_HA]   = "1",
8346 	[BLK_ZONED_HM]   = "2",
8347 };
8348 
sdeb_zbc_model_str(const char * cp)8349 static int sdeb_zbc_model_str(const char *cp)
8350 {
8351 	int res = sysfs_match_string(zbc_model_strs_a, cp);
8352 
8353 	if (res < 0) {
8354 		res = sysfs_match_string(zbc_model_strs_b, cp);
8355 		if (res < 0) {
8356 			res = sysfs_match_string(zbc_model_strs_c, cp);
8357 			if (res < 0)
8358 				return -EINVAL;
8359 		}
8360 	}
8361 	return res;
8362 }
8363 
zbc_show(struct device_driver * ddp,char * buf)8364 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
8365 {
8366 	return scnprintf(buf, PAGE_SIZE, "%s\n",
8367 			 zbc_model_strs_a[sdeb_zbc_model]);
8368 }
8369 static DRIVER_ATTR_RO(zbc);
8370 
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)8371 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
8372 {
8373 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
8374 }
8375 static DRIVER_ATTR_RO(tur_ms_to_ready);
8376 
group_number_stats_show(struct device_driver * ddp,char * buf)8377 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
8378 {
8379 	char *p = buf, *end = buf + PAGE_SIZE;
8380 	int i;
8381 
8382 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8383 		p += scnprintf(p, end - p, "%d %ld\n", i,
8384 			       atomic_long_read(&writes_by_group_number[i]));
8385 
8386 	return p - buf;
8387 }
8388 
group_number_stats_store(struct device_driver * ddp,const char * buf,size_t count)8389 static ssize_t group_number_stats_store(struct device_driver *ddp,
8390 					const char *buf, size_t count)
8391 {
8392 	int i;
8393 
8394 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8395 		atomic_long_set(&writes_by_group_number[i], 0);
8396 
8397 	return count;
8398 }
8399 static DRIVER_ATTR_RW(group_number_stats);
8400 
8401 /* Note: The following array creates attribute files in the
8402    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
8403    files (over those found in the /sys/module/scsi_debug/parameters
8404    directory) is that auxiliary actions can be triggered when an attribute
8405    is changed. For example see: add_host_store() above.
8406  */
8407 
8408 static struct attribute *sdebug_drv_attrs[] = {
8409 	&driver_attr_delay.attr,
8410 	&driver_attr_opts.attr,
8411 	&driver_attr_ptype.attr,
8412 	&driver_attr_dsense.attr,
8413 	&driver_attr_fake_rw.attr,
8414 	&driver_attr_host_max_queue.attr,
8415 	&driver_attr_no_lun_0.attr,
8416 	&driver_attr_num_tgts.attr,
8417 	&driver_attr_dev_size_mb.attr,
8418 	&driver_attr_num_parts.attr,
8419 	&driver_attr_every_nth.attr,
8420 	&driver_attr_lun_format.attr,
8421 	&driver_attr_max_luns.attr,
8422 	&driver_attr_max_queue.attr,
8423 	&driver_attr_no_rwlock.attr,
8424 	&driver_attr_no_uld.attr,
8425 	&driver_attr_scsi_level.attr,
8426 	&driver_attr_virtual_gb.attr,
8427 	&driver_attr_add_host.attr,
8428 	&driver_attr_per_host_store.attr,
8429 	&driver_attr_vpd_use_hostno.attr,
8430 	&driver_attr_sector_size.attr,
8431 	&driver_attr_statistics.attr,
8432 	&driver_attr_submit_queues.attr,
8433 	&driver_attr_dix.attr,
8434 	&driver_attr_dif.attr,
8435 	&driver_attr_guard.attr,
8436 	&driver_attr_ato.attr,
8437 	&driver_attr_map.attr,
8438 	&driver_attr_random.attr,
8439 	&driver_attr_removable.attr,
8440 	&driver_attr_host_lock.attr,
8441 	&driver_attr_ndelay.attr,
8442 	&driver_attr_strict.attr,
8443 	&driver_attr_uuid_ctl.attr,
8444 	&driver_attr_cdb_len.attr,
8445 	&driver_attr_tur_ms_to_ready.attr,
8446 	&driver_attr_zbc.attr,
8447 	&driver_attr_group_number_stats.attr,
8448 	NULL,
8449 };
8450 ATTRIBUTE_GROUPS(sdebug_drv);
8451 
8452 static struct device *pseudo_primary;
8453 
scsi_debug_init(void)8454 static int __init scsi_debug_init(void)
8455 {
8456 	bool want_store = (sdebug_fake_rw == 0);
8457 	unsigned long sz;
8458 	int k, ret, hosts_to_add;
8459 	int idx = -1;
8460 
8461 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
8462 		pr_warn("ndelay must be less than 1 second, ignored\n");
8463 		sdebug_ndelay = 0;
8464 	} else if (sdebug_ndelay > 0)
8465 		sdebug_jdelay = JDELAY_OVERRIDDEN;
8466 
8467 	switch (sdebug_sector_size) {
8468 	case  512:
8469 	case 1024:
8470 	case 2048:
8471 	case 4096:
8472 		break;
8473 	default:
8474 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
8475 		return -EINVAL;
8476 	}
8477 
8478 	switch (sdebug_dif) {
8479 	case T10_PI_TYPE0_PROTECTION:
8480 		break;
8481 	case T10_PI_TYPE1_PROTECTION:
8482 	case T10_PI_TYPE2_PROTECTION:
8483 	case T10_PI_TYPE3_PROTECTION:
8484 		have_dif_prot = true;
8485 		break;
8486 
8487 	default:
8488 		pr_err("dif must be 0, 1, 2 or 3\n");
8489 		return -EINVAL;
8490 	}
8491 
8492 	if (sdebug_num_tgts < 0) {
8493 		pr_err("num_tgts must be >= 0\n");
8494 		return -EINVAL;
8495 	}
8496 
8497 	if (sdebug_guard > 1) {
8498 		pr_err("guard must be 0 or 1\n");
8499 		return -EINVAL;
8500 	}
8501 
8502 	if (sdebug_ato > 1) {
8503 		pr_err("ato must be 0 or 1\n");
8504 		return -EINVAL;
8505 	}
8506 
8507 	if (sdebug_physblk_exp > 15) {
8508 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
8509 		return -EINVAL;
8510 	}
8511 
8512 	sdebug_lun_am = sdebug_lun_am_i;
8513 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
8514 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
8515 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
8516 	}
8517 
8518 	if (sdebug_max_luns > 256) {
8519 		if (sdebug_max_luns > 16384) {
8520 			pr_warn("max_luns can be no more than 16384, use default\n");
8521 			sdebug_max_luns = DEF_MAX_LUNS;
8522 		}
8523 		sdebug_lun_am = SAM_LUN_AM_FLAT;
8524 	}
8525 
8526 	if (sdebug_lowest_aligned > 0x3fff) {
8527 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
8528 		return -EINVAL;
8529 	}
8530 
8531 	if (submit_queues < 1) {
8532 		pr_err("submit_queues must be 1 or more\n");
8533 		return -EINVAL;
8534 	}
8535 
8536 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
8537 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
8538 		return -EINVAL;
8539 	}
8540 
8541 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
8542 	    (sdebug_host_max_queue < 0)) {
8543 		pr_err("host_max_queue must be in range [0 %d]\n",
8544 		       SDEBUG_CANQUEUE);
8545 		return -EINVAL;
8546 	}
8547 
8548 	if (sdebug_host_max_queue &&
8549 	    (sdebug_max_queue != sdebug_host_max_queue)) {
8550 		sdebug_max_queue = sdebug_host_max_queue;
8551 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
8552 			sdebug_max_queue);
8553 	}
8554 
8555 	/*
8556 	 * check for host managed zoned block device specified with
8557 	 * ptype=0x14 or zbc=XXX.
8558 	 */
8559 	if (sdebug_ptype == TYPE_ZBC) {
8560 		sdeb_zbc_model = BLK_ZONED_HM;
8561 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
8562 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
8563 		if (k < 0)
8564 			return k;
8565 		sdeb_zbc_model = k;
8566 		switch (sdeb_zbc_model) {
8567 		case BLK_ZONED_NONE:
8568 		case BLK_ZONED_HA:
8569 			sdebug_ptype = TYPE_DISK;
8570 			break;
8571 		case BLK_ZONED_HM:
8572 			sdebug_ptype = TYPE_ZBC;
8573 			break;
8574 		default:
8575 			pr_err("Invalid ZBC model\n");
8576 			return -EINVAL;
8577 		}
8578 	}
8579 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
8580 		sdeb_zbc_in_use = true;
8581 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8582 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
8583 	}
8584 
8585 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8586 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
8587 	if (sdebug_dev_size_mb < 1)
8588 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
8589 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8590 	sdebug_store_sectors = sz / sdebug_sector_size;
8591 	sdebug_capacity = get_sdebug_capacity();
8592 
8593 	/* play around with geometry, don't waste too much on track 0 */
8594 	sdebug_heads = 8;
8595 	sdebug_sectors_per = 32;
8596 	if (sdebug_dev_size_mb >= 256)
8597 		sdebug_heads = 64;
8598 	else if (sdebug_dev_size_mb >= 16)
8599 		sdebug_heads = 32;
8600 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8601 			       (sdebug_sectors_per * sdebug_heads);
8602 	if (sdebug_cylinders_per >= 1024) {
8603 		/* other LLDs do this; implies >= 1GB ram disk ... */
8604 		sdebug_heads = 255;
8605 		sdebug_sectors_per = 63;
8606 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8607 			       (sdebug_sectors_per * sdebug_heads);
8608 	}
8609 	if (scsi_debug_lbp()) {
8610 		sdebug_unmap_max_blocks =
8611 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
8612 
8613 		sdebug_unmap_max_desc =
8614 			clamp(sdebug_unmap_max_desc, 0U, 256U);
8615 
8616 		sdebug_unmap_granularity =
8617 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
8618 
8619 		if (sdebug_unmap_alignment &&
8620 		    sdebug_unmap_granularity <=
8621 		    sdebug_unmap_alignment) {
8622 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
8623 			return -EINVAL;
8624 		}
8625 	}
8626 
8627 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
8628 	if (want_store) {
8629 		idx = sdebug_add_store();
8630 		if (idx < 0)
8631 			return idx;
8632 	}
8633 
8634 	pseudo_primary = root_device_register("pseudo_0");
8635 	if (IS_ERR(pseudo_primary)) {
8636 		pr_warn("root_device_register() error\n");
8637 		ret = PTR_ERR(pseudo_primary);
8638 		goto free_vm;
8639 	}
8640 	ret = bus_register(&pseudo_lld_bus);
8641 	if (ret < 0) {
8642 		pr_warn("bus_register error: %d\n", ret);
8643 		goto dev_unreg;
8644 	}
8645 	ret = driver_register(&sdebug_driverfs_driver);
8646 	if (ret < 0) {
8647 		pr_warn("driver_register error: %d\n", ret);
8648 		goto bus_unreg;
8649 	}
8650 
8651 	hosts_to_add = sdebug_add_host;
8652 	sdebug_add_host = 0;
8653 
8654 	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
8655 	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
8656 		pr_info("%s: failed to create initial debugfs directory\n", __func__);
8657 
8658 	for (k = 0; k < hosts_to_add; k++) {
8659 		if (want_store && k == 0) {
8660 			ret = sdebug_add_host_helper(idx);
8661 			if (ret < 0) {
8662 				pr_err("add_host_helper k=%d, error=%d\n",
8663 				       k, -ret);
8664 				break;
8665 			}
8666 		} else {
8667 			ret = sdebug_do_add_host(want_store &&
8668 						 sdebug_per_host_store);
8669 			if (ret < 0) {
8670 				pr_err("add_host k=%d error=%d\n", k, -ret);
8671 				break;
8672 			}
8673 		}
8674 	}
8675 	if (sdebug_verbose)
8676 		pr_info("built %d host(s)\n", sdebug_num_hosts);
8677 
8678 	return 0;
8679 
8680 bus_unreg:
8681 	bus_unregister(&pseudo_lld_bus);
8682 dev_unreg:
8683 	root_device_unregister(pseudo_primary);
8684 free_vm:
8685 	sdebug_erase_store(idx, NULL);
8686 	return ret;
8687 }
8688 
scsi_debug_exit(void)8689 static void __exit scsi_debug_exit(void)
8690 {
8691 	int k = sdebug_num_hosts;
8692 
8693 	for (; k; k--)
8694 		sdebug_do_remove_host(true);
8695 	driver_unregister(&sdebug_driverfs_driver);
8696 	bus_unregister(&pseudo_lld_bus);
8697 	root_device_unregister(pseudo_primary);
8698 
8699 	sdebug_erase_all_stores(false);
8700 	xa_destroy(per_store_ap);
8701 	debugfs_remove(sdebug_debugfs_root);
8702 }
8703 
8704 device_initcall(scsi_debug_init);
8705 module_exit(scsi_debug_exit);
8706 
sdebug_release_adapter(struct device * dev)8707 static void sdebug_release_adapter(struct device *dev)
8708 {
8709 	struct sdebug_host_info *sdbg_host;
8710 
8711 	sdbg_host = dev_to_sdebug_host(dev);
8712 	kfree(sdbg_host);
8713 }
8714 
8715 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)8716 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
8717 {
8718 	if (idx < 0)
8719 		return;
8720 	if (!sip) {
8721 		if (xa_empty(per_store_ap))
8722 			return;
8723 		sip = xa_load(per_store_ap, idx);
8724 		if (!sip)
8725 			return;
8726 	}
8727 	vfree(sip->map_storep);
8728 	vfree(sip->dif_storep);
8729 	vfree(sip->storep);
8730 	xa_erase(per_store_ap, idx);
8731 	kfree(sip);
8732 }
8733 
8734 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)8735 static void sdebug_erase_all_stores(bool apart_from_first)
8736 {
8737 	unsigned long idx;
8738 	struct sdeb_store_info *sip = NULL;
8739 
8740 	xa_for_each(per_store_ap, idx, sip) {
8741 		if (apart_from_first)
8742 			apart_from_first = false;
8743 		else
8744 			sdebug_erase_store(idx, sip);
8745 	}
8746 	if (apart_from_first)
8747 		sdeb_most_recent_idx = sdeb_first_idx;
8748 }
8749 
8750 /*
8751  * Returns store xarray new element index (idx) if >=0 else negated errno.
8752  * Limit the number of stores to 65536.
8753  */
sdebug_add_store(void)8754 static int sdebug_add_store(void)
8755 {
8756 	int res;
8757 	u32 n_idx;
8758 	unsigned long iflags;
8759 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8760 	struct sdeb_store_info *sip = NULL;
8761 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
8762 
8763 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
8764 	if (!sip)
8765 		return -ENOMEM;
8766 
8767 	xa_lock_irqsave(per_store_ap, iflags);
8768 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
8769 	if (unlikely(res < 0)) {
8770 		xa_unlock_irqrestore(per_store_ap, iflags);
8771 		kfree(sip);
8772 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
8773 		return res;
8774 	}
8775 	sdeb_most_recent_idx = n_idx;
8776 	if (sdeb_first_idx < 0)
8777 		sdeb_first_idx = n_idx;
8778 	xa_unlock_irqrestore(per_store_ap, iflags);
8779 
8780 	res = -ENOMEM;
8781 	sip->storep = vzalloc(sz);
8782 	if (!sip->storep) {
8783 		pr_err("user data oom\n");
8784 		goto err;
8785 	}
8786 	if (sdebug_num_parts > 0)
8787 		sdebug_build_parts(sip->storep, sz);
8788 
8789 	/* DIF/DIX: what T10 calls Protection Information (PI) */
8790 	if (sdebug_dix) {
8791 		int dif_size;
8792 
8793 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
8794 		sip->dif_storep = vmalloc(dif_size);
8795 
8796 		pr_info("dif_storep %u bytes @ %p\n", dif_size,
8797 			sip->dif_storep);
8798 
8799 		if (!sip->dif_storep) {
8800 			pr_err("DIX oom\n");
8801 			goto err;
8802 		}
8803 		memset(sip->dif_storep, 0xff, dif_size);
8804 	}
8805 	/* Logical Block Provisioning */
8806 	if (scsi_debug_lbp()) {
8807 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
8808 		sip->map_storep = vmalloc(array_size(sizeof(long),
8809 						     BITS_TO_LONGS(map_size)));
8810 
8811 		pr_info("%lu provisioning blocks\n", map_size);
8812 
8813 		if (!sip->map_storep) {
8814 			pr_err("LBP map oom\n");
8815 			goto err;
8816 		}
8817 
8818 		bitmap_zero(sip->map_storep, map_size);
8819 
8820 		/* Map first 1KB for partition table */
8821 		if (sdebug_num_parts)
8822 			map_region(sip, 0, 2);
8823 	}
8824 
8825 	rwlock_init(&sip->macc_data_lck);
8826 	rwlock_init(&sip->macc_meta_lck);
8827 	rwlock_init(&sip->macc_sector_lck);
8828 	return (int)n_idx;
8829 err:
8830 	sdebug_erase_store((int)n_idx, sip);
8831 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
8832 	return res;
8833 }
8834 
sdebug_add_host_helper(int per_host_idx)8835 static int sdebug_add_host_helper(int per_host_idx)
8836 {
8837 	int k, devs_per_host, idx;
8838 	int error = -ENOMEM;
8839 	struct sdebug_host_info *sdbg_host;
8840 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8841 
8842 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
8843 	if (!sdbg_host)
8844 		return -ENOMEM;
8845 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
8846 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
8847 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8848 	sdbg_host->si_idx = idx;
8849 
8850 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
8851 
8852 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
8853 	for (k = 0; k < devs_per_host; k++) {
8854 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
8855 		if (!sdbg_devinfo)
8856 			goto clean;
8857 	}
8858 
8859 	mutex_lock(&sdebug_host_list_mutex);
8860 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
8861 	mutex_unlock(&sdebug_host_list_mutex);
8862 
8863 	sdbg_host->dev.bus = &pseudo_lld_bus;
8864 	sdbg_host->dev.parent = pseudo_primary;
8865 	sdbg_host->dev.release = &sdebug_release_adapter;
8866 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
8867 
8868 	error = device_register(&sdbg_host->dev);
8869 	if (error) {
8870 		mutex_lock(&sdebug_host_list_mutex);
8871 		list_del(&sdbg_host->host_list);
8872 		mutex_unlock(&sdebug_host_list_mutex);
8873 		goto clean;
8874 	}
8875 
8876 	++sdebug_num_hosts;
8877 	return 0;
8878 
8879 clean:
8880 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8881 				 dev_list) {
8882 		list_del(&sdbg_devinfo->dev_list);
8883 		kfree(sdbg_devinfo->zstate);
8884 		kfree(sdbg_devinfo);
8885 	}
8886 	if (sdbg_host->dev.release)
8887 		put_device(&sdbg_host->dev);
8888 	else
8889 		kfree(sdbg_host);
8890 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
8891 	return error;
8892 }
8893 
sdebug_do_add_host(bool mk_new_store)8894 static int sdebug_do_add_host(bool mk_new_store)
8895 {
8896 	int ph_idx = sdeb_most_recent_idx;
8897 
8898 	if (mk_new_store) {
8899 		ph_idx = sdebug_add_store();
8900 		if (ph_idx < 0)
8901 			return ph_idx;
8902 	}
8903 	return sdebug_add_host_helper(ph_idx);
8904 }
8905 
sdebug_do_remove_host(bool the_end)8906 static void sdebug_do_remove_host(bool the_end)
8907 {
8908 	int idx = -1;
8909 	struct sdebug_host_info *sdbg_host = NULL;
8910 	struct sdebug_host_info *sdbg_host2;
8911 
8912 	mutex_lock(&sdebug_host_list_mutex);
8913 	if (!list_empty(&sdebug_host_list)) {
8914 		sdbg_host = list_entry(sdebug_host_list.prev,
8915 				       struct sdebug_host_info, host_list);
8916 		idx = sdbg_host->si_idx;
8917 	}
8918 	if (!the_end && idx >= 0) {
8919 		bool unique = true;
8920 
8921 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
8922 			if (sdbg_host2 == sdbg_host)
8923 				continue;
8924 			if (idx == sdbg_host2->si_idx) {
8925 				unique = false;
8926 				break;
8927 			}
8928 		}
8929 		if (unique) {
8930 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8931 			if (idx == sdeb_most_recent_idx)
8932 				--sdeb_most_recent_idx;
8933 		}
8934 	}
8935 	if (sdbg_host)
8936 		list_del(&sdbg_host->host_list);
8937 	mutex_unlock(&sdebug_host_list_mutex);
8938 
8939 	if (!sdbg_host)
8940 		return;
8941 
8942 	device_unregister(&sdbg_host->dev);
8943 	--sdebug_num_hosts;
8944 }
8945 
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)8946 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
8947 {
8948 	struct sdebug_dev_info *devip = sdev->hostdata;
8949 
8950 	if (!devip)
8951 		return	-ENODEV;
8952 
8953 	mutex_lock(&sdebug_host_list_mutex);
8954 	block_unblock_all_queues(true);
8955 
8956 	if (qdepth > SDEBUG_CANQUEUE) {
8957 		qdepth = SDEBUG_CANQUEUE;
8958 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
8959 			qdepth, SDEBUG_CANQUEUE);
8960 	}
8961 	if (qdepth < 1)
8962 		qdepth = 1;
8963 	if (qdepth != sdev->queue_depth)
8964 		scsi_change_queue_depth(sdev, qdepth);
8965 
8966 	block_unblock_all_queues(false);
8967 	mutex_unlock(&sdebug_host_list_mutex);
8968 
8969 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
8970 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
8971 
8972 	return sdev->queue_depth;
8973 }
8974 
fake_timeout(struct scsi_cmnd * scp)8975 static bool fake_timeout(struct scsi_cmnd *scp)
8976 {
8977 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
8978 		if (sdebug_every_nth < -1)
8979 			sdebug_every_nth = -1;
8980 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
8981 			return true; /* ignore command causing timeout */
8982 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
8983 			 scsi_medium_access_command(scp))
8984 			return true; /* time out reads and writes */
8985 	}
8986 	return false;
8987 }
8988 
8989 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)8990 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
8991 {
8992 	int stopped_state;
8993 	u64 diff_ns = 0;
8994 	ktime_t now_ts = ktime_get_boottime();
8995 	struct scsi_device *sdp = scp->device;
8996 
8997 	stopped_state = atomic_read(&devip->stopped);
8998 	if (stopped_state == 2) {
8999 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
9000 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
9001 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
9002 				/* tur_ms_to_ready timer extinguished */
9003 				atomic_set(&devip->stopped, 0);
9004 				return 0;
9005 			}
9006 		}
9007 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
9008 		if (sdebug_verbose)
9009 			sdev_printk(KERN_INFO, sdp,
9010 				    "%s: Not ready: in process of becoming ready\n", my_name);
9011 		if (scp->cmnd[0] == TEST_UNIT_READY) {
9012 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
9013 
9014 			if (diff_ns <= tur_nanosecs_to_ready)
9015 				diff_ns = tur_nanosecs_to_ready - diff_ns;
9016 			else
9017 				diff_ns = tur_nanosecs_to_ready;
9018 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
9019 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
9020 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
9021 						   diff_ns);
9022 			return check_condition_result;
9023 		}
9024 	}
9025 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
9026 	if (sdebug_verbose)
9027 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
9028 			    my_name);
9029 	return check_condition_result;
9030 }
9031 
sdebug_map_queues(struct Scsi_Host * shost)9032 static void sdebug_map_queues(struct Scsi_Host *shost)
9033 {
9034 	int i, qoff;
9035 
9036 	if (shost->nr_hw_queues == 1)
9037 		return;
9038 
9039 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
9040 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
9041 
9042 		map->nr_queues  = 0;
9043 
9044 		if (i == HCTX_TYPE_DEFAULT)
9045 			map->nr_queues = submit_queues - poll_queues;
9046 		else if (i == HCTX_TYPE_POLL)
9047 			map->nr_queues = poll_queues;
9048 
9049 		if (!map->nr_queues) {
9050 			BUG_ON(i == HCTX_TYPE_DEFAULT);
9051 			continue;
9052 		}
9053 
9054 		map->queue_offset = qoff;
9055 		blk_mq_map_queues(map);
9056 
9057 		qoff += map->nr_queues;
9058 	}
9059 }
9060 
9061 struct sdebug_blk_mq_poll_data {
9062 	unsigned int queue_num;
9063 	int *num_entries;
9064 };
9065 
9066 /*
9067  * We don't handle aborted commands here, but it does not seem possible to have
9068  * aborted polled commands from schedule_resp()
9069  */
sdebug_blk_mq_poll_iter(struct request * rq,void * opaque)9070 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
9071 {
9072 	struct sdebug_blk_mq_poll_data *data = opaque;
9073 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
9074 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9075 	struct sdebug_defer *sd_dp;
9076 	u32 unique_tag = blk_mq_unique_tag(rq);
9077 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
9078 	unsigned long flags;
9079 	int queue_num = data->queue_num;
9080 	ktime_t time;
9081 
9082 	/* We're only interested in one queue for this iteration */
9083 	if (hwq != queue_num)
9084 		return true;
9085 
9086 	/* Subsequent checks would fail if this failed, but check anyway */
9087 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
9088 		return true;
9089 
9090 	time = ktime_get_boottime();
9091 
9092 	spin_lock_irqsave(&sdsc->lock, flags);
9093 	sd_dp = &sdsc->sd_dp;
9094 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
9095 		spin_unlock_irqrestore(&sdsc->lock, flags);
9096 		return true;
9097 	}
9098 
9099 	if (time < sd_dp->cmpl_ts) {
9100 		spin_unlock_irqrestore(&sdsc->lock, flags);
9101 		return true;
9102 	}
9103 	spin_unlock_irqrestore(&sdsc->lock, flags);
9104 
9105 	if (sdebug_statistics) {
9106 		atomic_inc(&sdebug_completions);
9107 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
9108 			atomic_inc(&sdebug_miss_cpus);
9109 	}
9110 
9111 	scsi_done(cmd); /* callback to mid level */
9112 	(*data->num_entries)++;
9113 	return true;
9114 }
9115 
sdebug_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)9116 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
9117 {
9118 	int num_entries = 0;
9119 	struct sdebug_blk_mq_poll_data data = {
9120 		.queue_num = queue_num,
9121 		.num_entries = &num_entries,
9122 	};
9123 
9124 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
9125 				&data);
9126 
9127 	if (num_entries > 0)
9128 		atomic_add(num_entries, &sdeb_mq_poll_count);
9129 	return num_entries;
9130 }
9131 
sdebug_timeout_cmd(struct scsi_cmnd * cmnd)9132 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
9133 {
9134 	struct scsi_device *sdp = cmnd->device;
9135 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9136 	struct sdebug_err_inject *err;
9137 	unsigned char *cmd = cmnd->cmnd;
9138 	int ret = 0;
9139 
9140 	if (devip == NULL)
9141 		return 0;
9142 
9143 	rcu_read_lock();
9144 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9145 		if (err->type == ERR_TMOUT_CMD &&
9146 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
9147 			ret = !!err->cnt;
9148 			if (err->cnt < 0)
9149 				err->cnt++;
9150 
9151 			rcu_read_unlock();
9152 			return ret;
9153 		}
9154 	}
9155 	rcu_read_unlock();
9156 
9157 	return 0;
9158 }
9159 
sdebug_fail_queue_cmd(struct scsi_cmnd * cmnd)9160 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
9161 {
9162 	struct scsi_device *sdp = cmnd->device;
9163 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9164 	struct sdebug_err_inject *err;
9165 	unsigned char *cmd = cmnd->cmnd;
9166 	int ret = 0;
9167 
9168 	if (devip == NULL)
9169 		return 0;
9170 
9171 	rcu_read_lock();
9172 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9173 		if (err->type == ERR_FAIL_QUEUE_CMD &&
9174 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
9175 			ret = err->cnt ? err->queuecmd_ret : 0;
9176 			if (err->cnt < 0)
9177 				err->cnt++;
9178 
9179 			rcu_read_unlock();
9180 			return ret;
9181 		}
9182 	}
9183 	rcu_read_unlock();
9184 
9185 	return 0;
9186 }
9187 
sdebug_fail_cmd(struct scsi_cmnd * cmnd,int * retval,struct sdebug_err_inject * info)9188 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
9189 			   struct sdebug_err_inject *info)
9190 {
9191 	struct scsi_device *sdp = cmnd->device;
9192 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9193 	struct sdebug_err_inject *err;
9194 	unsigned char *cmd = cmnd->cmnd;
9195 	int ret = 0;
9196 	int result;
9197 
9198 	if (devip == NULL)
9199 		return 0;
9200 
9201 	rcu_read_lock();
9202 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9203 		if (err->type == ERR_FAIL_CMD &&
9204 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
9205 			if (!err->cnt) {
9206 				rcu_read_unlock();
9207 				return 0;
9208 			}
9209 
9210 			ret = !!err->cnt;
9211 			rcu_read_unlock();
9212 			goto out_handle;
9213 		}
9214 	}
9215 	rcu_read_unlock();
9216 
9217 	return 0;
9218 
9219 out_handle:
9220 	if (err->cnt < 0)
9221 		err->cnt++;
9222 	mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
9223 	result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
9224 	*info = *err;
9225 	*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
9226 
9227 	return ret;
9228 }
9229 
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)9230 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
9231 				   struct scsi_cmnd *scp)
9232 {
9233 	u8 sdeb_i;
9234 	struct scsi_device *sdp = scp->device;
9235 	const struct opcode_info_t *oip;
9236 	const struct opcode_info_t *r_oip;
9237 	struct sdebug_dev_info *devip;
9238 	u8 *cmd = scp->cmnd;
9239 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
9240 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
9241 	int k, na;
9242 	int errsts = 0;
9243 	u64 lun_index = sdp->lun & 0x3FFF;
9244 	u32 flags;
9245 	u16 sa;
9246 	u8 opcode = cmd[0];
9247 	u32 devsel = sdebug_get_devsel(scp->device);
9248 	bool has_wlun_rl;
9249 	bool inject_now;
9250 	int ret = 0;
9251 	struct sdebug_err_inject err;
9252 
9253 	scsi_set_resid(scp, 0);
9254 	if (sdebug_statistics) {
9255 		atomic_inc(&sdebug_cmnd_count);
9256 		inject_now = inject_on_this_cmd();
9257 	} else {
9258 		inject_now = false;
9259 	}
9260 	if (unlikely(sdebug_verbose &&
9261 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
9262 		char b[120];
9263 		int n, len, sb;
9264 
9265 		len = scp->cmd_len;
9266 		sb = (int)sizeof(b);
9267 		if (len > 32)
9268 			strcpy(b, "too long, over 32 bytes");
9269 		else {
9270 			for (k = 0, n = 0; k < len && n < sb; ++k)
9271 				n += scnprintf(b + n, sb - n, "%02x ",
9272 					       (u32)cmd[k]);
9273 		}
9274 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
9275 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
9276 	}
9277 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
9278 		return SCSI_MLQUEUE_HOST_BUSY;
9279 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
9280 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
9281 		goto err_out;
9282 
9283 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
9284 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
9285 	devip = (struct sdebug_dev_info *)sdp->hostdata;
9286 	if (unlikely(!devip)) {
9287 		devip = find_build_dev_info(sdp);
9288 		if (NULL == devip)
9289 			goto err_out;
9290 	}
9291 
9292 	if (sdebug_timeout_cmd(scp)) {
9293 		scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
9294 		return 0;
9295 	}
9296 
9297 	ret = sdebug_fail_queue_cmd(scp);
9298 	if (ret) {
9299 		scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
9300 				opcode, ret);
9301 		return ret;
9302 	}
9303 
9304 	if (sdebug_fail_cmd(scp, &ret, &err)) {
9305 		scmd_printk(KERN_INFO, scp,
9306 			"fail command 0x%x with hostbyte=0x%x, "
9307 			"driverbyte=0x%x, statusbyte=0x%x, "
9308 			"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
9309 			opcode, err.host_byte, err.driver_byte,
9310 			err.status_byte, err.sense_key, err.asc, err.asq);
9311 		return ret;
9312 	}
9313 
9314 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
9315 		atomic_set(&sdeb_inject_pending, 1);
9316 
9317 	na = oip->num_attached;
9318 	r_pfp = oip->pfp;
9319 	if (na) {	/* multiple commands with this opcode */
9320 		r_oip = oip;
9321 		if (FF_SA & r_oip->flags) {
9322 			if (F_SA_LOW & oip->flags)
9323 				sa = 0x1f & cmd[1];
9324 			else
9325 				sa = get_unaligned_be16(cmd + 8);
9326 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9327 				if (opcode == oip->opcode && sa == oip->sa &&
9328 					(devsel & oip->devsel) != 0)
9329 					break;
9330 			}
9331 		} else {   /* since no service action only check opcode */
9332 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9333 				if (opcode == oip->opcode &&
9334 					(devsel & oip->devsel) != 0)
9335 					break;
9336 			}
9337 		}
9338 		if (k > na) {
9339 			if (F_SA_LOW & r_oip->flags)
9340 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
9341 			else if (F_SA_HIGH & r_oip->flags)
9342 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
9343 			else
9344 				mk_sense_invalid_opcode(scp);
9345 			goto check_cond;
9346 		}
9347 	}	/* else (when na==0) we assume the oip is a match */
9348 	flags = oip->flags;
9349 	if (unlikely(F_INV_OP & flags)) {
9350 		mk_sense_invalid_opcode(scp);
9351 		goto check_cond;
9352 	}
9353 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
9354 		if (sdebug_verbose)
9355 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
9356 				    my_name, opcode, " supported for wlun");
9357 		mk_sense_invalid_opcode(scp);
9358 		goto check_cond;
9359 	}
9360 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
9361 		u8 rem;
9362 		int j;
9363 
9364 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
9365 			rem = ~oip->len_mask[k] & cmd[k];
9366 			if (rem) {
9367 				for (j = 7; j >= 0; --j, rem <<= 1) {
9368 					if (0x80 & rem)
9369 						break;
9370 				}
9371 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
9372 				goto check_cond;
9373 			}
9374 		}
9375 	}
9376 	if (unlikely(!(F_SKIP_UA & flags) &&
9377 		     find_first_bit(devip->uas_bm,
9378 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
9379 		errsts = make_ua(scp, devip);
9380 		if (errsts)
9381 			goto check_cond;
9382 	}
9383 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
9384 		     atomic_read(&devip->stopped))) {
9385 		errsts = resp_not_ready(scp, devip);
9386 		if (errsts)
9387 			goto fini;
9388 	}
9389 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
9390 		goto fini;
9391 	if (unlikely(sdebug_every_nth)) {
9392 		if (fake_timeout(scp))
9393 			return 0;	/* ignore command: make trouble */
9394 	}
9395 	if (likely(oip->pfp))
9396 		pfp = oip->pfp;	/* calls a resp_* function */
9397 	else
9398 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
9399 
9400 fini:
9401 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
9402 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
9403 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
9404 					    sdebug_ndelay > 10000)) {
9405 		/*
9406 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
9407 		 * for Start Stop Unit (SSU) want at least 1 second delay and
9408 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
9409 		 * For Synchronize Cache want 1/20 of SSU's delay.
9410 		 */
9411 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
9412 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
9413 
9414 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
9415 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
9416 	} else
9417 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
9418 				     sdebug_ndelay);
9419 check_cond:
9420 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
9421 err_out:
9422 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
9423 }
9424 
sdebug_init_cmd_priv(struct Scsi_Host * shost,struct scsi_cmnd * cmd)9425 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
9426 {
9427 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9428 	struct sdebug_defer *sd_dp = &sdsc->sd_dp;
9429 
9430 	spin_lock_init(&sdsc->lock);
9431 	hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
9432 		      HRTIMER_MODE_REL_PINNED);
9433 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
9434 
9435 	return 0;
9436 }
9437 
9438 static const struct scsi_host_template sdebug_driver_template = {
9439 	.show_info =		scsi_debug_show_info,
9440 	.write_info =		scsi_debug_write_info,
9441 	.proc_name =		sdebug_proc_name,
9442 	.name =			"SCSI DEBUG",
9443 	.info =			scsi_debug_info,
9444 	.sdev_init =		scsi_debug_sdev_init,
9445 	.sdev_configure =	scsi_debug_sdev_configure,
9446 	.sdev_destroy =		scsi_debug_sdev_destroy,
9447 	.ioctl =		scsi_debug_ioctl,
9448 	.queuecommand =		scsi_debug_queuecommand,
9449 	.change_queue_depth =	sdebug_change_qdepth,
9450 	.map_queues =		sdebug_map_queues,
9451 	.mq_poll =		sdebug_blk_mq_poll,
9452 	.eh_abort_handler =	scsi_debug_abort,
9453 	.eh_device_reset_handler = scsi_debug_device_reset,
9454 	.eh_target_reset_handler = scsi_debug_target_reset,
9455 	.eh_bus_reset_handler = scsi_debug_bus_reset,
9456 	.eh_host_reset_handler = scsi_debug_host_reset,
9457 	.can_queue =		SDEBUG_CANQUEUE,
9458 	.this_id =		7,
9459 	.sg_tablesize =		SG_MAX_SEGMENTS,
9460 	.cmd_per_lun =		DEF_CMD_PER_LUN,
9461 	.max_sectors =		-1U,
9462 	.max_segment_size =	-1U,
9463 	.module =		THIS_MODULE,
9464 	.skip_settle_delay =	1,
9465 	.track_queue_depth =	1,
9466 	.cmd_size = sizeof(struct sdebug_scsi_cmd),
9467 	.init_cmd_priv = sdebug_init_cmd_priv,
9468 	.target_alloc =		sdebug_target_alloc,
9469 	.target_destroy =	sdebug_target_destroy,
9470 };
9471 
sdebug_driver_probe(struct device * dev)9472 static int sdebug_driver_probe(struct device *dev)
9473 {
9474 	int error = 0;
9475 	struct sdebug_host_info *sdbg_host;
9476 	struct Scsi_Host *hpnt;
9477 	int hprot;
9478 
9479 	sdbg_host = dev_to_sdebug_host(dev);
9480 
9481 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
9482 	if (NULL == hpnt) {
9483 		pr_err("scsi_host_alloc failed\n");
9484 		error = -ENODEV;
9485 		return error;
9486 	}
9487 	hpnt->can_queue = sdebug_max_queue;
9488 	hpnt->cmd_per_lun = sdebug_max_queue;
9489 	if (!sdebug_clustering)
9490 		hpnt->dma_boundary = PAGE_SIZE - 1;
9491 
9492 	if (submit_queues > nr_cpu_ids) {
9493 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
9494 			my_name, submit_queues, nr_cpu_ids);
9495 		submit_queues = nr_cpu_ids;
9496 	}
9497 	/*
9498 	 * Decide whether to tell scsi subsystem that we want mq. The
9499 	 * following should give the same answer for each host.
9500 	 */
9501 	hpnt->nr_hw_queues = submit_queues;
9502 	if (sdebug_host_max_queue)
9503 		hpnt->host_tagset = 1;
9504 
9505 	/* poll queues are possible for nr_hw_queues > 1 */
9506 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
9507 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
9508 			 my_name, poll_queues, hpnt->nr_hw_queues);
9509 		poll_queues = 0;
9510 	}
9511 
9512 	/*
9513 	 * Poll queues don't need interrupts, but we need at least one I/O queue
9514 	 * left over for non-polled I/O.
9515 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
9516 	 */
9517 	if (poll_queues >= submit_queues) {
9518 		if (submit_queues < 3)
9519 			pr_warn("%s: trim poll_queues to 1\n", my_name);
9520 		else
9521 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
9522 				my_name, submit_queues - 1);
9523 		poll_queues = 1;
9524 	}
9525 	if (poll_queues)
9526 		hpnt->nr_maps = 3;
9527 
9528 	sdbg_host->shost = hpnt;
9529 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
9530 		hpnt->max_id = sdebug_num_tgts + 1;
9531 	else
9532 		hpnt->max_id = sdebug_num_tgts;
9533 	/* = sdebug_max_luns; */
9534 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
9535 
9536 	hprot = 0;
9537 
9538 	switch (sdebug_dif) {
9539 
9540 	case T10_PI_TYPE1_PROTECTION:
9541 		hprot = SHOST_DIF_TYPE1_PROTECTION;
9542 		if (sdebug_dix)
9543 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
9544 		break;
9545 
9546 	case T10_PI_TYPE2_PROTECTION:
9547 		hprot = SHOST_DIF_TYPE2_PROTECTION;
9548 		if (sdebug_dix)
9549 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
9550 		break;
9551 
9552 	case T10_PI_TYPE3_PROTECTION:
9553 		hprot = SHOST_DIF_TYPE3_PROTECTION;
9554 		if (sdebug_dix)
9555 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
9556 		break;
9557 
9558 	default:
9559 		if (sdebug_dix)
9560 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
9561 		break;
9562 	}
9563 
9564 	scsi_host_set_prot(hpnt, hprot);
9565 
9566 	if (have_dif_prot || sdebug_dix)
9567 		pr_info("host protection%s%s%s%s%s%s%s\n",
9568 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
9569 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
9570 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
9571 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
9572 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
9573 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
9574 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
9575 
9576 	if (sdebug_guard == 1)
9577 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
9578 	else
9579 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
9580 
9581 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
9582 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
9583 	if (sdebug_every_nth)	/* need stats counters for every_nth */
9584 		sdebug_statistics = true;
9585 	error = scsi_add_host(hpnt, &sdbg_host->dev);
9586 	if (error) {
9587 		pr_err("scsi_add_host failed\n");
9588 		error = -ENODEV;
9589 		scsi_host_put(hpnt);
9590 	} else {
9591 		scsi_scan_host(hpnt);
9592 	}
9593 
9594 	return error;
9595 }
9596 
sdebug_driver_remove(struct device * dev)9597 static void sdebug_driver_remove(struct device *dev)
9598 {
9599 	struct sdebug_host_info *sdbg_host;
9600 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
9601 
9602 	sdbg_host = dev_to_sdebug_host(dev);
9603 
9604 	scsi_remove_host(sdbg_host->shost);
9605 
9606 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
9607 				 dev_list) {
9608 		list_del(&sdbg_devinfo->dev_list);
9609 		kfree(sdbg_devinfo->zstate);
9610 		kfree(sdbg_devinfo);
9611 	}
9612 
9613 	scsi_host_put(sdbg_host->shost);
9614 }
9615 
9616 static const struct bus_type pseudo_lld_bus = {
9617 	.name = "pseudo",
9618 	.probe = sdebug_driver_probe,
9619 	.remove = sdebug_driver_remove,
9620 	.drv_groups = sdebug_drv_groups,
9621 };
9622