Lines Matching +full:ssc +full:- +full:block +full:- +full:bus
1 // SPDX-License-Identifier: GPL-2.0-or-later
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
33 #include <linux/crc-t10dif.h>
39 #include <linux/t10-pi.h>
88 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
145 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
159 #define JDELAY_OVERRIDDEN -9999
197 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
201 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
219 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
227 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
228 #define F_D_IN 1 /* Data-in command (e.g. READ) */
229 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
232 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
240 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
257 #define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
258 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
386 dev_to_sdebug_host(shost->dma_dev)
424 u32 flags; /* OR-ed set of SDEB_F_* */
427 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
454 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
464 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
470 /* 0x0; 0x0->0x1f: 6 byte cdbs */
478 /* 0x20; 0x20->0x3f: 10 byte cdbs */
483 /* 0x40; 0x40->0x5f: 10 byte cdbs */
489 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
493 /* 0x80; 0x80->0x9f: 16 byte cdbs */
500 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
507 /* 0xc0; 0xc0->0xff: vendor specific */
515 * The following "response" functions return the SCSI mid-level's 4 byte
516 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
654 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
774 0, 0, 0, 0} }, /* PRE-FETCH (10) */
875 static int sdeb_first_idx = -1; /* invalid index ==> none created */
876 static int sdeb_most_recent_idx = -1;
890 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
896 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
906 .bus = &pseudo_lld_bus,
932 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata; in sdebug_err_add()
935 spin_lock(&devip->list_lock); in sdebug_err_add()
936 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_err_add()
937 if (err->type == new->type && err->cmd == new->cmd) { in sdebug_err_add()
938 list_del_rcu(&err->list); in sdebug_err_add()
939 call_rcu(&err->rcu, sdebug_err_free); in sdebug_err_add()
943 list_add_tail_rcu(&new->list, &devip->inject_err_list); in sdebug_err_add()
944 spin_unlock(&devip->list_lock); in sdebug_err_add()
949 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata; in sdebug_err_remove()
954 if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) { in sdebug_err_remove()
956 return -EINVAL; in sdebug_err_remove()
959 spin_lock(&devip->list_lock); in sdebug_err_remove()
960 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_err_remove()
961 if (err->type == type && err->cmd == cmd) { in sdebug_err_remove()
962 list_del_rcu(&err->list); in sdebug_err_remove()
963 call_rcu(&err->rcu, sdebug_err_free); in sdebug_err_remove()
964 spin_unlock(&devip->list_lock); in sdebug_err_remove()
969 spin_unlock(&devip->list_lock); in sdebug_err_remove()
972 return -EINVAL; in sdebug_err_remove()
977 struct scsi_device *sdev = (struct scsi_device *)m->private; in sdebug_error_show()
978 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata; in sdebug_error_show()
984 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_error_show()
985 switch (err->type) { in sdebug_error_show()
989 seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt, in sdebug_error_show()
990 err->cmd); in sdebug_error_show()
994 seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type, in sdebug_error_show()
995 err->cnt, err->cmd, err->queuecmd_ret); in sdebug_error_show()
1000 err->type, err->cnt, err->cmd, in sdebug_error_show()
1001 err->host_byte, err->driver_byte, in sdebug_error_show()
1002 err->status_byte, err->sense_key, in sdebug_error_show()
1003 err->asc, err->asq); in sdebug_error_show()
1014 return single_open(file, sdebug_error_show, inode->i_private); in sdebug_error_open()
1023 struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private; in sdebug_error_write()
1027 return -ENOMEM; in sdebug_error_write()
1031 return -EFAULT; in sdebug_error_write()
1034 if (buf[0] == '-') in sdebug_error_write()
1039 return -EINVAL; in sdebug_error_write()
1045 return -ENOMEM; in sdebug_error_write()
1052 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt, in sdebug_error_write()
1053 &inject->cmd) != 3) in sdebug_error_write()
1058 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt, in sdebug_error_write()
1059 &inject->cmd, &inject->queuecmd_ret) != 4) in sdebug_error_write()
1065 &inject->type, &inject->cnt, &inject->cmd, in sdebug_error_write()
1066 &inject->host_byte, &inject->driver_byte, in sdebug_error_write()
1067 &inject->status_byte, &inject->sense_key, in sdebug_error_write()
1068 &inject->asc, &inject->asq) != 9) in sdebug_error_write()
1085 return -EINVAL; in sdebug_error_write()
1097 struct scsi_target *starget = (struct scsi_target *)m->private; in sdebug_target_reset_fail_show()
1099 (struct sdebug_target_info *)starget->hostdata; in sdebug_target_reset_fail_show()
1102 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N'); in sdebug_target_reset_fail_show()
1109 return single_open(file, sdebug_target_reset_fail_show, inode->i_private); in sdebug_target_reset_fail_open()
1117 (struct scsi_target *)file->f_inode->i_private; in sdebug_target_reset_fail_write()
1119 (struct sdebug_target_info *)starget->hostdata; in sdebug_target_reset_fail_write()
1122 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail); in sdebug_target_reset_fail_write()
1125 return -ENODEV; in sdebug_target_reset_fail_write()
1141 return -ENOMEM; in sdebug_target_alloc()
1143 targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev), in sdebug_target_alloc()
1146 debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget, in sdebug_target_alloc()
1149 starget->hostdata = targetip; in sdebug_target_alloc()
1158 debugfs_remove(targetip->debugfs_entry); in sdebug_tartget_cleanup_async()
1166 targetip = (struct sdebug_target_info *)starget->hostdata; in sdebug_target_destroy()
1168 starget->hostdata = NULL; in sdebug_target_destroy()
1173 /* Only do the extra work involved in logical block provisioning if one or
1189 if (!sip || !sip->storep) { in lba2fake_store()
1193 return lsip->storep + lba * sdebug_sector_size; in lba2fake_store()
1201 return sip->dif_storep + sector; in dif_store()
1211 hpnt = sdbg_host->shost; in sdebug_max_tgts_luns()
1212 if ((hpnt->this_id >= 0) && in sdebug_max_tgts_luns()
1213 (sdebug_num_tgts > hpnt->this_id)) in sdebug_max_tgts_luns()
1214 hpnt->max_id = sdebug_num_tgts + 1; in sdebug_max_tgts_luns()
1216 hpnt->max_id = sdebug_num_tgts; in sdebug_max_tgts_luns()
1218 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; in sdebug_max_tgts_luns()
1225 /* Set in_bit to -1 to indicate no bit position of invalid field */
1234 sbuff = scp->sense_buffer; in mk_sense_invalid_fld()
1236 sdev_printk(KERN_ERR, scp->device, in mk_sense_invalid_fld()
1261 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq" in mk_sense_invalid_fld()
1268 if (!scp->sense_buffer) { in mk_sense_buffer()
1269 sdev_printk(KERN_ERR, scp->device, in mk_sense_buffer()
1273 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); in mk_sense_buffer()
1278 sdev_printk(KERN_INFO, scp->device, in mk_sense_buffer()
1303 return -EINVAL; in scsi_debug_ioctl()
1304 /* return -ENOTTY; // correct return but upsets fdisk */ in scsi_debug_ioctl()
1311 sdev->use_10_for_rw = false; in config_cdb_len()
1312 sdev->use_16_for_rw = false; in config_cdb_len()
1313 sdev->use_10_for_ms = false; in config_cdb_len()
1316 sdev->use_10_for_rw = true; in config_cdb_len()
1317 sdev->use_16_for_rw = false; in config_cdb_len()
1318 sdev->use_10_for_ms = false; in config_cdb_len()
1321 sdev->use_10_for_rw = true; in config_cdb_len()
1322 sdev->use_16_for_rw = false; in config_cdb_len()
1323 sdev->use_10_for_ms = true; in config_cdb_len()
1326 sdev->use_10_for_rw = false; in config_cdb_len()
1327 sdev->use_16_for_rw = true; in config_cdb_len()
1328 sdev->use_10_for_ms = true; in config_cdb_len()
1331 sdev->use_10_for_rw = false; in config_cdb_len()
1332 sdev->use_16_for_rw = true; in config_cdb_len()
1333 sdev->use_10_for_ms = true; in config_cdb_len()
1338 sdev->use_10_for_rw = true; in config_cdb_len()
1339 sdev->use_16_for_rw = false; in config_cdb_len()
1340 sdev->use_10_for_ms = false; in config_cdb_len()
1354 shost = sdbg_host->shost; in all_config_cdb_len()
1364 struct sdebug_host_info *sdhp = devip->sdbg_host; in clear_luns_changed_on_target()
1367 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) { in clear_luns_changed_on_target()
1368 if ((devip->sdbg_host == dp->sdbg_host) && in clear_luns_changed_on_target()
1369 (devip->target == dp->target)) { in clear_luns_changed_on_target()
1370 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm); in clear_luns_changed_on_target()
1379 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS); in make_ua()
1400 cp = "bus reset"; in make_ua()
1430 * SPC-3 behavior is to report a UNIT ATTENTION with in make_ua()
1433 * received. SPC-4 behavior is to report it only once. in make_ua()
1435 * values as struct scsi_device->scsi_level. in make_ua()
1437 if (sdebug_scsi_level >= 6) /* SPC-4 and above */ in make_ua()
1451 clear_bit(k, devip->uas_bm); in make_ua()
1453 sdev_printk(KERN_INFO, scp->device, in make_ua()
1461 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1466 struct scsi_data_buffer *sdb = &scp->sdb; in fill_from_dev_buffer()
1468 if (!sdb->length) in fill_from_dev_buffer()
1470 if (scp->sc_data_direction != DMA_FROM_DEVICE) in fill_from_dev_buffer()
1473 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, in fill_from_dev_buffer()
1475 scsi_set_resid(scp, scsi_bufflen(scp) - act_len); in fill_from_dev_buffer()
1480 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1481 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1489 struct scsi_data_buffer *sdb = &scp->sdb; in p_fill_from_dev_buffer()
1492 if (sdb->length <= off_dst) in p_fill_from_dev_buffer()
1494 if (scp->sc_data_direction != DMA_FROM_DEVICE) in p_fill_from_dev_buffer()
1497 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents, in p_fill_from_dev_buffer()
1502 n = scsi_bufflen(scp) - (off_dst + act_len); in p_fill_from_dev_buffer()
1507 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1508 * 'arr' or -1 if error.
1515 if (scp->sc_data_direction != DMA_TO_DEVICE) in fetch_to_dev_buffer()
1516 return -1; in fetch_to_dev_buffer()
1562 /* NAA-3, Logical unit identifier (binary) */ in inquiry_vpd_83()
1580 /* NAA-3, Target port identifier */ in inquiry_vpd_83()
1587 /* NAA-3, Target port group identifier */ in inquiry_vpd_83()
1596 /* NAA-3, Target device identifier */ in inquiry_vpd_83()
1604 arr[num++] = 0x63; /* proto=sas, UTF-8 */ in inquiry_vpd_83()
1648 memset(arr + num + olen, 0, plen - olen); in inquiry_vpd_85()
1660 memset(arr + num + olen, 0, plen - olen); in inquiry_vpd_85()
1682 /* naa-5 target port identifier (A) */ in inquiry_vpd_88()
1697 /* naa-5 target port identifier (B) */ in inquiry_vpd_88()
1768 /* Block limits VPD page (SBC-3) */
1794 /* Maximum Unmap Block Descriptor Count */ in inquiry_vpd_b0()
1810 return 0x3c; /* Mandatory page length for Logical Block Provisioning */ in inquiry_vpd_b0()
1813 /* Block device characteristics VPD page (SBC-3) */
1825 /* Logical block provisioning VPD page (SBC-4) */
1844 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1851 * Optimal number of non-sequentially written sequential write in inquiry_vpd_b6()
1857 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open) in inquiry_vpd_b6()
1858 put_unaligned_be32(devip->max_open, &arr[12]); in inquiry_vpd_b6()
1861 if (devip->zcap < devip->zsize) { in inquiry_vpd_b6()
1863 put_unaligned_be64(devip->zsize, &arr[20]); in inquiry_vpd_b6()
1877 unsigned char *cmd = scp->cmnd; in resp_inquiry()
1887 is_zbc = devip->zoned; in resp_inquiry()
1889 have_wlun = scsi_is_wlun(scp->device->lun); in resp_inquiry()
1892 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL)) in resp_inquiry()
1905 int host_no = devip->sdbg_host->shost->host_no; in resp_inquiry()
1908 (devip->channel & 0x7f); in resp_inquiry()
1911 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) + in resp_inquiry()
1912 (devip->target * 1000) + devip->lun); in resp_inquiry()
1914 (devip->target * 1000) - 3; in resp_inquiry()
1929 arr[n++] = 0xb0; /* Block limits */ in resp_inquiry()
1930 arr[n++] = 0xb1; /* Block characteristics */ in resp_inquiry()
1936 arr[3] = n - 4; /* number of supported VPD pages */ in resp_inquiry()
1946 &devip->lu_name); in resp_inquiry()
1966 arr[4] = 0x2; /* disconnect-reconnect mp */ in resp_inquiry()
1977 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */ in resp_inquiry()
1980 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */ in resp_inquiry()
1990 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); in resp_inquiry()
2004 arr[4] = SDEBUG_LONG_INQ_SZ - 5; in resp_inquiry()
2017 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */ in resp_inquiry()
2018 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */ in resp_inquiry()
2020 if (is_disk) { /* SBC-4 no version claimed */ in resp_inquiry()
2023 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */ in resp_inquiry()
2030 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */ in resp_inquiry()
2044 unsigned char *cmd = scp->cmnd; in resp_requests()
2049 int stopped_state = atomic_read(&devip->stopped); in resp_requests()
2097 unsigned char *cmd = scp->cmnd; in resp_start_stop()
2107 stopped_state = atomic_read(&devip->stopped); in resp_start_stop()
2111 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) { in resp_start_stop()
2112 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts)); in resp_start_stop()
2116 atomic_set(&devip->stopped, 0); in resp_start_stop()
2131 atomic_xchg(&devip->stopped, want_stop); in resp_start_stop()
2160 capac = (unsigned int)sdebug_capacity - 1; in resp_readcap()
2172 unsigned char *cmd = scp->cmnd; in resp_readcap16()
2180 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0); in resp_readcap16()
2197 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices. in resp_readcap16()
2199 if (devip->zoned) in resp_readcap16()
2205 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */ in resp_readcap16()
2218 unsigned char *cmd = scp->cmnd; in resp_report_tgtpgs()
2220 int host_no = devip->sdbg_host->shost->host_no; in resp_report_tgtpgs()
2238 (devip->channel & 0x7f); in resp_report_tgtpgs()
2240 (devip->channel & 0x7f) + 0x80; in resp_report_tgtpgs()
2276 rlen = n - 4; in resp_report_tgtpgs()
2281 * - The allocated length in resp_report_tgtpgs()
2282 * - The constructed command length in resp_report_tgtpgs()
2283 * - The maximum array size in resp_report_tgtpgs()
2303 u8 *cmd = scp->cmnd; in resp_rsup_opcodes()
2311 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); in resp_rsup_opcodes()
2328 oip->num_attached != 0xff; ++oip) { in resp_rsup_opcodes()
2329 if (F_INV_OP & oip->flags) in resp_rsup_opcodes()
2331 count += (oip->num_attached + 1); in resp_rsup_opcodes()
2336 oip->num_attached != 0xff && offset < a_len; ++oip) { in resp_rsup_opcodes()
2337 if (F_INV_OP & oip->flags) in resp_rsup_opcodes()
2339 na = oip->num_attached; in resp_rsup_opcodes()
2340 arr[offset] = oip->opcode; in resp_rsup_opcodes()
2341 put_unaligned_be16(oip->sa, arr + offset + 2); in resp_rsup_opcodes()
2344 if (FF_SA & oip->flags) in resp_rsup_opcodes()
2346 put_unaligned_be16(oip->len_mask[0], arr + offset + 6); in resp_rsup_opcodes()
2350 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { in resp_rsup_opcodes()
2351 if (F_INV_OP & oip->flags) in resp_rsup_opcodes()
2354 arr[offset] = oip->opcode; in resp_rsup_opcodes()
2355 put_unaligned_be16(oip->sa, arr + offset + 2); in resp_rsup_opcodes()
2358 if (FF_SA & oip->flags) in resp_rsup_opcodes()
2360 put_unaligned_be16(oip->len_mask[0], in resp_rsup_opcodes()
2375 if (F_INV_OP & oip->flags) { in resp_rsup_opcodes()
2380 if (FF_SA & oip->flags) { in resp_rsup_opcodes()
2388 0 == (FF_SA & oip->flags)) { in resp_rsup_opcodes()
2389 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1); in resp_rsup_opcodes()
2393 if (0 == (FF_SA & oip->flags) && in resp_rsup_opcodes()
2394 req_opcode == oip->opcode) in resp_rsup_opcodes()
2396 else if (0 == (FF_SA & oip->flags)) { in resp_rsup_opcodes()
2397 na = oip->num_attached; in resp_rsup_opcodes()
2398 for (k = 0, oip = oip->arrp; k < na; in resp_rsup_opcodes()
2400 if (req_opcode == oip->opcode) in resp_rsup_opcodes()
2404 } else if (req_sa != oip->sa) { in resp_rsup_opcodes()
2405 na = oip->num_attached; in resp_rsup_opcodes()
2406 for (k = 0, oip = oip->arrp; k < na; in resp_rsup_opcodes()
2408 if (req_sa == oip->sa) in resp_rsup_opcodes()
2415 u = oip->len_mask[0]; in resp_rsup_opcodes()
2417 arr[4] = oip->opcode; in resp_rsup_opcodes()
2420 oip->len_mask[k] : 0xff; in resp_rsup_opcodes()
2449 u8 *cmd = scp->cmnd; in resp_rsup_tmfs()
2455 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); in resp_rsup_tmfs()
2473 { /* Read-Write Error Recovery page for mode_sense */ in resp_err_recov_pg()
2479 memset(p + 2, 0, sizeof(err_recov_pg) - 2); in resp_err_recov_pg()
2484 { /* Disconnect-Reconnect page for mode_sense */ in resp_disconnect_pg()
2490 memset(p + 2, 0, sizeof(disconnect_pg) - 2); in resp_disconnect_pg()
2506 memset(p + 2, 0, sizeof(format_pg) - 2); in resp_format_pg()
2574 { /* SAS SSP mode page - short format for mode_sense */ in resp_sas_sf_m_pg()
2580 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2); in resp_sas_sf_m_pg()
2614 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4); in resp_sas_pcd_m_spg()
2626 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4); in resp_sas_sha_m_spg()
2639 int target = scp->device->id; in resp_mode_sense()
2642 unsigned char *cmd = scp->cmnd; in resp_mode_sense()
2645 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */ in resp_mode_sense()
2652 is_zbc = devip->zoned; in resp_mode_sense()
2663 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + in resp_mode_sense()
2664 (devip->target * 1000) - 3; in resp_mode_sense()
2667 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */ in resp_mode_sense()
2704 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); in resp_mode_sense()
2710 case 0x1: /* Read-Write error recovery page, direct access */ in resp_mode_sense()
2714 case 0x2: /* Disconnect-Reconnect page, all devices */ in resp_mode_sense()
2738 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); in resp_mode_sense()
2778 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); in resp_mode_sense()
2791 arr[0] = offset - 1; in resp_mode_sense()
2793 put_unaligned_be16((offset - 2), arr + 0); in resp_mode_sense()
2805 unsigned char *cmd = scp->cmnd; in resp_mode_select()
2813 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1); in resp_mode_select()
2817 if (-1 == res) in resp_mode_select()
2820 sdev_printk(KERN_INFO, scp->device, in resp_mode_select()
2827 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); in resp_mode_select()
2848 sizeof(caching_pg) - 2); in resp_mode_select()
2855 sizeof(ctrl_m_pg) - 2); in resp_mode_select()
2867 sizeof(iec_m_pg) - 2); in resp_mode_select()
2877 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm); in resp_mode_select()
2924 unsigned char *cmd = scp->cmnd; in resp_log_sense()
2944 arr[3] = n - 4; in resp_log_sense()
2976 arr[3] = n - 4; in resp_log_sense()
2986 arr[3] = n - 4; in resp_log_sense()
2994 arr[3] = n - 4; in resp_log_sense()
3010 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); in resp_log_sense()
3020 return devip->nr_zones != 0; in sdebug_dev_is_zoned()
3026 u32 zno = lba >> devip->zsize_shift; in zbc_zone()
3029 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones) in zbc_zone()
3030 return &devip->zstate[zno]; in zbc_zone()
3036 zno = 2 * zno - devip->nr_conv_zones; in zbc_zone()
3037 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones); in zbc_zone()
3038 zsp = &devip->zstate[zno]; in zbc_zone()
3039 if (lba >= zsp->z_start + zsp->z_size) in zbc_zone()
3041 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size); in zbc_zone()
3047 return zsp->z_type == ZBC_ZTYPE_CNV; in zbc_zone_is_conv()
3052 return zsp->z_type == ZBC_ZTYPE_GAP; in zbc_zone_is_gap()
3068 zc = zsp->z_cond; in zbc_close_zone()
3073 devip->nr_imp_open--; in zbc_close_zone()
3075 devip->nr_exp_open--; in zbc_close_zone()
3077 if (zsp->z_wp == zsp->z_start) { in zbc_close_zone()
3078 zsp->z_cond = ZC1_EMPTY; in zbc_close_zone()
3080 zsp->z_cond = ZC4_CLOSED; in zbc_close_zone()
3081 devip->nr_closed++; in zbc_close_zone()
3087 struct sdeb_zone_state *zsp = &devip->zstate[0]; in zbc_close_imp_open_zone()
3090 for (i = 0; i < devip->nr_zones; i++, zsp++) { in zbc_close_imp_open_zone()
3091 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) { in zbc_close_imp_open_zone()
3106 zc = zsp->z_cond; in zbc_open_zone()
3112 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN) in zbc_open_zone()
3114 else if (devip->max_open && in zbc_open_zone()
3115 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open) in zbc_open_zone()
3118 if (zsp->z_cond == ZC4_CLOSED) in zbc_open_zone()
3119 devip->nr_closed--; in zbc_open_zone()
3121 zsp->z_cond = ZC3_EXPLICIT_OPEN; in zbc_open_zone()
3122 devip->nr_exp_open++; in zbc_open_zone()
3124 zsp->z_cond = ZC2_IMPLICIT_OPEN; in zbc_open_zone()
3125 devip->nr_imp_open++; in zbc_open_zone()
3132 switch (zsp->z_cond) { in zbc_set_zone_full()
3134 devip->nr_imp_open--; in zbc_set_zone_full()
3137 devip->nr_exp_open--; in zbc_set_zone_full()
3141 zsp->z_start, zsp->z_cond); in zbc_set_zone_full()
3144 zsp->z_cond = ZC5_FULL; in zbc_set_zone_full()
3151 unsigned long long n, end, zend = zsp->z_start + zsp->z_size; in zbc_inc_wp()
3156 if (zsp->z_type == ZBC_ZTYPE_SWR) { in zbc_inc_wp()
3157 zsp->z_wp += num; in zbc_inc_wp()
3158 if (zsp->z_wp >= zend) in zbc_inc_wp()
3164 if (lba != zsp->z_wp) in zbc_inc_wp()
3165 zsp->z_non_seq_resource = true; in zbc_inc_wp()
3169 n = zend - lba; in zbc_inc_wp()
3170 zsp->z_wp = zend; in zbc_inc_wp()
3171 } else if (end > zsp->z_wp) { in zbc_inc_wp()
3173 zsp->z_wp = end; in zbc_inc_wp()
3177 if (zsp->z_wp >= zend) in zbc_inc_wp()
3180 num -= n; in zbc_inc_wp()
3184 zend = zsp->z_start + zsp->z_size; in zbc_inc_wp()
3192 struct scsi_device *sdp = scp->device; in check_zbc_access_params()
3193 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in check_zbc_access_params()
3195 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1); in check_zbc_access_params()
3198 /* For host-managed, reads cannot cross zone types boundaries */ in check_zbc_access_params()
3199 if (zsp->z_type != zsp_end->z_type) { in check_zbc_access_params()
3226 if (zsp->z_type == ZBC_ZTYPE_SWR) { in check_zbc_access_params()
3235 if (zsp->z_cond == ZC5_FULL) { in check_zbc_access_params()
3241 if (lba != zsp->z_wp) { in check_zbc_access_params()
3250 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) { in check_zbc_access_params()
3251 if (devip->max_open && in check_zbc_access_params()
3252 devip->nr_exp_open >= devip->max_open) { in check_zbc_access_params()
3268 struct scsi_device *sdp = scp->device; in check_device_access_params()
3269 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in check_device_access_params()
3275 /* transfer length excessive (tie in to block limits VPD page) */ in check_device_access_params()
3304 return xa_load(per_store_ap, devip->sdbg_host->si_idx); in devip2sip()
3307 /* Returns number of bytes copied or -1 if error. */
3312 u64 block, rest = 0; in do_device_access() local
3314 struct scsi_data_buffer *sdb = &scp->sdb; in do_device_access()
3324 if (!sdb->length || !sip) in do_device_access()
3326 if (scp->sc_data_direction != dir) in do_device_access()
3327 return -1; in do_device_access()
3328 fsp = sip->storep; in do_device_access()
3330 block = do_div(lba, sdebug_store_sectors); in do_device_access()
3331 if (block + num > sdebug_store_sectors) in do_device_access()
3332 rest = block + num - sdebug_store_sectors; in do_device_access()
3334 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, in do_device_access()
3335 fsp + (block * sdebug_sector_size), in do_device_access()
3336 (num - rest) * sdebug_sector_size, sg_skip, do_write); in do_device_access()
3337 if (ret != (num - rest) * sdebug_sector_size) in do_device_access()
3341 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, in do_device_access()
3343 sg_skip + ((num - rest) * sdebug_sector_size), in do_device_access()
3350 /* Returns number of bytes copied or -1 if error. */
3353 struct scsi_data_buffer *sdb = &scp->sdb; in do_dout_fetch()
3355 if (!sdb->length) in do_dout_fetch()
3357 if (scp->sc_data_direction != DMA_TO_DEVICE) in do_dout_fetch()
3358 return -1; in do_dout_fetch()
3359 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp, in do_dout_fetch()
3363 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3364 * arr into sip->storep+lba and return true. If comparison fails then
3370 u64 block, rest = 0; in comp_write_worker() local
3373 u8 *fsp = sip->storep; in comp_write_worker()
3375 block = do_div(lba, store_blks); in comp_write_worker()
3376 if (block + num > store_blks) in comp_write_worker()
3377 rest = block + num - store_blks; in comp_write_worker()
3379 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size); in comp_write_worker()
3383 res = memcmp(fsp, arr + ((num - rest) * lb_size), in comp_write_worker()
3390 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size); in comp_write_worker()
3392 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size); in comp_write_worker()
3413 if (sdt->guard_tag != csum) { in dif_verify()
3416 be16_to_cpu(sdt->guard_tag), in dif_verify()
3421 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { in dif_verify()
3427 be32_to_cpu(sdt->ref_tag) != ei_lba) { in dif_verify()
3441 scp->device->hostdata, true); in dif_copy_prot()
3442 struct t10_pi_tuple *dif_storep = sip->dif_storep; in dif_copy_prot()
3459 rest = start + len - dif_store_end; in dif_copy_prot()
3464 memcpy(paddr, start, len - rest); in dif_copy_prot()
3466 memcpy(start, paddr, len - rest); in dif_copy_prot()
3470 memcpy(paddr + len - rest, dif_storep, rest); in dif_copy_prot()
3472 memcpy(dif_storep, paddr + len - rest, rest); in dif_copy_prot()
3476 resid -= len; in dif_copy_prot()
3488 scp->device->hostdata, true); in prot_verify_read()
3495 if (sdt->app_tag == cpu_to_be16(0xffff)) in prot_verify_read()
3505 if (scp->cmnd[1] >> 5) { /* RDPROTECT */ in prot_verify_read()
3526 __acquire(&sip->macc_lck); in sdeb_read_lock()
3531 read_lock(&sip->macc_lck); in sdeb_read_lock()
3542 __release(&sip->macc_lck); in sdeb_read_unlock()
3547 read_unlock(&sip->macc_lck); in sdeb_read_unlock()
3558 __acquire(&sip->macc_lck); in sdeb_write_lock()
3563 write_lock(&sip->macc_lck); in sdeb_write_lock()
3574 __release(&sip->macc_lck); in sdeb_write_unlock()
3579 write_unlock(&sip->macc_lck); in sdeb_write_unlock()
3593 u8 *cmd = scp->cmnd; in resp_read_dt0()
3643 sdev_printk(KERN_ERR, scp->device, "Unprotected RD " in resp_read_dt0()
3656 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) && in resp_read_dt0()
3661 if (0x70 == (scp->sense_buffer[0] & 0x7f)) { in resp_read_dt0()
3662 scp->sense_buffer[0] |= 0x80; /* Valid bit */ in resp_read_dt0()
3665 put_unaligned_be32(ret, scp->sense_buffer + 3); in resp_read_dt0()
3681 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { in resp_read_dt0()
3692 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) { in resp_read_dt0()
3703 if (unlikely(ret == -1)) in resp_read_dt0()
3706 scsi_set_resid(scp, scsi_bufflen(scp) - ret); in resp_read_dt0()
3715 /* Logical block guard check failed */ in resp_read_dt0()
3773 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */ in prot_verify_write()
3803 lba += sdebug_unmap_granularity - sdebug_unmap_alignment; in lba_to_map_index()
3813 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment; in map_index_to_lba()
3826 mapped = test_bit(index, sip->map_storep); in map_state()
3829 next = find_next_zero_bit(sip->map_storep, map_size, index); in map_state()
3831 next = find_next_bit(sip->map_storep, map_size, index); in map_state()
3834 *num = end - lba; in map_state()
3847 set_bit(index, sip->map_storep); in map_region()
3857 u8 *fsp = sip->storep; in unmap_region()
3865 clear_bit(index, sip->map_storep); in unmap_region()
3872 if (sip->dif_storep) { in unmap_region()
3873 memset(sip->dif_storep + lba, 0xff, in unmap_region()
3874 sizeof(*sip->dif_storep) * in unmap_region()
3890 u8 *cmd = scp->cmnd; in resp_write_dt0()
3940 sdev_printk(KERN_ERR, scp->device, "Unprotected WR " in resp_write_dt0()
3955 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { in resp_write_dt0()
3959 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ in resp_write_dt0()
3966 if (scp->prot_flags & SCSI_PROT_REF_CHECK) { in resp_write_dt0()
3970 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ in resp_write_dt0()
3986 if (unlikely(-1 == ret)) in resp_write_dt0()
3990 sdev_printk(KERN_INFO, scp->device, in resp_write_dt0()
4001 /* Logical block guard check failed */ in resp_write_dt0()
4021 u8 *cmd = scp->cmnd; in resp_write_scat()
4056 sdev_printk(KERN_ERR, scp->device, in resp_write_scat()
4061 return 0; /* T10 says these do-nothings are not errors */ in resp_write_scat()
4064 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
4073 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
4083 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
4087 if (res == -1) { in resp_write_scat()
4100 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
4113 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
4141 if (unlikely(-1 == ret)) { in resp_write_scat()
4145 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
4157 /* Logical block guard check failed */ in resp_write_scat()
4183 struct scsi_device *sdp = scp->device; in resp_write_same()
4184 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in resp_write_same()
4186 u64 block, lbaa; in resp_write_same() local
4190 scp->device->hostdata, true); in resp_write_same()
4207 block = do_div(lbaa, sdebug_store_sectors); in resp_write_same()
4208 /* if ndob then zero 1 logical block, else fetch 1 logical block */ in resp_write_same()
4209 fsp = sip->storep; in resp_write_same()
4210 fs1p = fsp + (block * lb_size); in resp_write_same()
4217 if (-1 == ret) { in resp_write_same()
4221 sdev_printk(KERN_INFO, scp->device, in resp_write_same()
4228 block = do_div(lbaa, sdebug_store_sectors); in resp_write_same()
4229 memmove(fsp + (block * lb_size), fs1p, lb_size); in resp_write_same()
4245 u8 *cmd = scp->cmnd; in resp_write_same_10()
4261 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); in resp_write_same_10()
4270 u8 *cmd = scp->cmnd; in resp_write_same_16()
4284 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */ in resp_write_same_16()
4289 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1); in resp_write_same_16()
4296 * field. For the Report supported operation codes command, SPC-4 suggests
4301 u8 *cmd = scp->cmnd; in resp_write_buffer()
4302 struct scsi_device *sdp = scp->device; in resp_write_buffer()
4310 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); in resp_write_buffer()
4311 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm); in resp_write_buffer()
4314 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm); in resp_write_buffer()
4319 &devip->sdbg_host->dev_info_list, in resp_write_buffer()
4321 if (dp->target == sdp->id) { in resp_write_buffer()
4322 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm); in resp_write_buffer()
4325 dp->uas_bm); in resp_write_buffer()
4331 &devip->sdbg_host->dev_info_list, in resp_write_buffer()
4333 if (dp->target == sdp->id) in resp_write_buffer()
4335 dp->uas_bm); in resp_write_buffer()
4347 u8 *cmd = scp->cmnd; in resp_comp_write()
4369 sdev_printk(KERN_ERR, scp->device, "Unprotected WR " in resp_comp_write()
4385 if (ret == -1) { in resp_comp_write()
4389 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " in resp_comp_write()
4421 payload_len = get_unaligned_be16(scp->cmnd + 7); in resp_unmap()
4424 descriptors = (payload_len - 8) / 16; in resp_unmap()
4426 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); in resp_unmap()
4439 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2); in resp_unmap()
4471 u8 *cmd = scp->cmnd; in resp_get_lba_status()
4495 if (sdebug_capacity - lba <= 0xffffffff) in resp_get_lba_status()
4496 num = sdebug_capacity - lba; in resp_get_lba_status()
4516 u8 *cmd = scp->cmnd; in resp_sync_cache()
4537 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4548 u64 block, rest = 0; in resp_pre_fetch() local
4550 u8 *cmd = scp->cmnd; in resp_pre_fetch()
4552 u8 *fsp = sip->storep; in resp_pre_fetch()
4557 } else { /* PRE-FETCH(16) */ in resp_pre_fetch()
4567 /* PRE-FETCH spec says nothing about LBP or PI so skip them */ in resp_pre_fetch()
4568 block = do_div(lba, sdebug_store_sectors); in resp_pre_fetch()
4569 if (block + nblks > sdebug_store_sectors) in resp_pre_fetch()
4570 rest = block + nblks - sdebug_store_sectors; in resp_pre_fetch()
4572 /* Try to bring the PRE-FETCH range into CPU's cache */ in resp_pre_fetch()
4574 prefetch_range(fsp + (sdebug_sector_size * block), in resp_pre_fetch()
4575 (nblks - rest) * sdebug_sector_size); in resp_pre_fetch()
4588 * (W-LUN), the normal Linux scanning logic does not associate it with a
4590 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4592 * the above will associate a W-LUN to each target. To only get a W-LUN
4593 * for target 2, then use "echo '- 2 49409' > scan" .
4598 unsigned char *cmd = scp->cmnd; in resp_report_luns()
4605 unsigned int wlun_cnt; /* report luns W-LUN count */ in resp_report_luns()
4619 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); in resp_report_luns()
4624 case 0: /* all LUNs apart from W-LUNs */ in resp_report_luns()
4628 case 1: /* only W-LUNs */ in resp_report_luns()
4637 case 0x11: /* see SPC-5 */ in resp_report_luns()
4641 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); in resp_report_luns()
4646 --lun_cnt; in resp_report_luns()
4669 lun_p->scsi_lun[0] |= 0x40; in resp_report_luns()
4697 u8 *cmd = scp->cmnd; in resp_verify()
4707 is_bytchk3 = true; /* 1 block sent, compared repeatedly */ in resp_verify()
4740 if (ret == -1) { in resp_verify()
4744 sdev_printk(KERN_INFO, scp->device, in resp_verify()
4776 u8 *cmd = scp->cmnd; in resp_report_zones()
4796 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD); in resp_report_zones()
4809 lba = zsp->z_start + zsp->z_size) { in resp_report_zones()
4819 if (zsp->z_cond != ZC1_EMPTY) in resp_report_zones()
4824 if (zsp->z_cond != ZC2_IMPLICIT_OPEN) in resp_report_zones()
4829 if (zsp->z_cond != ZC3_EXPLICIT_OPEN) in resp_report_zones()
4834 if (zsp->z_cond != ZC4_CLOSED) in resp_report_zones()
4839 if (zsp->z_cond != ZC5_FULL) in resp_report_zones()
4846 * Read-only, offline, reset WP recommended are in resp_report_zones()
4851 /* non-seq-resource set */ in resp_report_zones()
4852 if (!zsp->z_non_seq_resource) in resp_report_zones()
4874 desc[0] = zsp->z_type; in resp_report_zones()
4875 desc[1] = zsp->z_cond << 4; in resp_report_zones()
4876 if (zsp->z_non_seq_resource) in resp_report_zones()
4878 put_unaligned_be64((u64)zsp->z_size, desc + 8); in resp_report_zones()
4879 put_unaligned_be64((u64)zsp->z_start, desc + 16); in resp_report_zones()
4880 put_unaligned_be64((u64)zsp->z_wp, desc + 24); in resp_report_zones()
4894 put_unaligned_be64(sdebug_capacity - 1, arr + 8); in resp_report_zones()
4896 if (devip->zcap < devip->zsize) in resp_report_zones()
4897 put_unaligned_be64(devip->zsize, arr + 16); in resp_report_zones()
4899 rep_len = (unsigned long)desc - (unsigned long)arr; in resp_report_zones()
4908 /* Logic transplanted from tcmu-runner, file_zbc.c */
4911 struct sdeb_zone_state *zsp = &devip->zstate[0]; in zbc_open_all()
4914 for (i = 0; i < devip->nr_zones; i++, zsp++) { in zbc_open_all()
4915 if (zsp->z_cond == ZC4_CLOSED) in zbc_open_all()
4916 zbc_open_zone(devip, &devip->zstate[i], true); in zbc_open_all()
4925 u8 *cmd = scp->cmnd; in resp_open_zone()
4939 if (devip->max_open && in resp_open_zone()
4940 devip->nr_exp_open + devip->nr_closed > devip->max_open) { in resp_open_zone()
4960 if (z_id != zsp->z_start) { in resp_open_zone()
4971 zc = zsp->z_cond; in resp_open_zone()
4975 if (devip->max_open && devip->nr_exp_open >= devip->max_open) { in resp_open_zone()
4992 for (i = 0; i < devip->nr_zones; i++) in zbc_close_all()
4993 zbc_close_zone(devip, &devip->zstate[i]); in zbc_close_all()
5001 u8 *cmd = scp->cmnd; in resp_close_zone()
5027 if (z_id != zsp->z_start) { in resp_close_zone()
5047 enum sdebug_z_cond zc = zsp->z_cond; in zbc_finish_zone()
5053 if (zsp->z_cond == ZC4_CLOSED) in zbc_finish_zone()
5054 devip->nr_closed--; in zbc_finish_zone()
5055 zsp->z_wp = zsp->z_start + zsp->z_size; in zbc_finish_zone()
5056 zsp->z_cond = ZC5_FULL; in zbc_finish_zone()
5064 for (i = 0; i < devip->nr_zones; i++) in zbc_finish_all()
5065 zbc_finish_zone(devip, &devip->zstate[i], false); in zbc_finish_all()
5074 u8 *cmd = scp->cmnd; in resp_finish_zone()
5099 if (z_id != zsp->z_start) { in resp_finish_zone()
5125 zc = zsp->z_cond; in zbc_rwp_zone()
5129 if (zsp->z_cond == ZC4_CLOSED) in zbc_rwp_zone()
5130 devip->nr_closed--; in zbc_rwp_zone()
5132 if (zsp->z_wp > zsp->z_start) in zbc_rwp_zone()
5133 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0, in zbc_rwp_zone()
5134 (zsp->z_wp - zsp->z_start) * sdebug_sector_size); in zbc_rwp_zone()
5136 zsp->z_non_seq_resource = false; in zbc_rwp_zone()
5137 zsp->z_wp = zsp->z_start; in zbc_rwp_zone()
5138 zsp->z_cond = ZC1_EMPTY; in zbc_rwp_zone()
5145 for (i = 0; i < devip->nr_zones; i++) in zbc_rwp_all()
5146 zbc_rwp_zone(devip, &devip->zstate[i]); in zbc_rwp_all()
5154 u8 *cmd = scp->cmnd; in resp_rwp_zone()
5178 if (z_id != zsp->z_start) { in resp_rwp_zone()
5205 struct scsi_cmnd *scp = sqcp->scmd; in sdebug_q_cmd_complete()
5211 if (raw_smp_processor_id() != sd_dp->issuing_cpu) in sdebug_q_cmd_complete()
5221 spin_lock_irqsave(&sdsc->lock, flags); in sdebug_q_cmd_complete()
5222 aborted = sd_dp->aborted; in sdebug_q_cmd_complete()
5224 sd_dp->aborted = false; in sdebug_q_cmd_complete()
5227 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_q_cmd_complete()
5230 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n"); in sdebug_q_cmd_complete()
5275 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M) in sdebug_device_create_zones()
5277 while (capacity < devip->zsize << 2 && devip->zsize >= 2) in sdebug_device_create_zones()
5278 devip->zsize >>= 1; in sdebug_device_create_zones()
5279 if (devip->zsize < 2) { in sdebug_device_create_zones()
5281 return -EINVAL; in sdebug_device_create_zones()
5286 return -EINVAL; in sdebug_device_create_zones()
5288 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M) in sdebug_device_create_zones()
5290 if (devip->zsize >= capacity) { in sdebug_device_create_zones()
5292 return -EINVAL; in sdebug_device_create_zones()
5296 devip->zsize_shift = ilog2(devip->zsize); in sdebug_device_create_zones()
5297 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift; in sdebug_device_create_zones()
5300 devip->zcap = devip->zsize; in sdebug_device_create_zones()
5302 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >> in sdebug_device_create_zones()
5304 if (devip->zcap > devip->zsize) { in sdebug_device_create_zones()
5306 return -EINVAL; in sdebug_device_create_zones()
5310 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift; in sdebug_device_create_zones()
5313 return -EINVAL; in sdebug_device_create_zones()
5315 devip->nr_conv_zones = sdeb_zbc_nr_conv; in sdebug_device_create_zones()
5316 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >> in sdebug_device_create_zones()
5317 devip->zsize_shift; in sdebug_device_create_zones()
5318 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones; in sdebug_device_create_zones()
5321 if (devip->zcap < devip->zsize) in sdebug_device_create_zones()
5322 devip->nr_zones += devip->nr_seq_zones; in sdebug_device_create_zones()
5324 if (devip->zoned) { in sdebug_device_create_zones()
5326 if (sdeb_zbc_max_open >= devip->nr_zones - 1) in sdebug_device_create_zones()
5327 devip->max_open = (devip->nr_zones - 1) / 2; in sdebug_device_create_zones()
5329 devip->max_open = sdeb_zbc_max_open; in sdebug_device_create_zones()
5332 devip->zstate = kcalloc(devip->nr_zones, in sdebug_device_create_zones()
5334 if (!devip->zstate) in sdebug_device_create_zones()
5335 return -ENOMEM; in sdebug_device_create_zones()
5337 for (i = 0; i < devip->nr_zones; i++) { in sdebug_device_create_zones()
5338 zsp = &devip->zstate[i]; in sdebug_device_create_zones()
5340 zsp->z_start = zstart; in sdebug_device_create_zones()
5342 if (i < devip->nr_conv_zones) { in sdebug_device_create_zones()
5343 zsp->z_type = ZBC_ZTYPE_CNV; in sdebug_device_create_zones()
5344 zsp->z_cond = ZBC_NOT_WRITE_POINTER; in sdebug_device_create_zones()
5345 zsp->z_wp = (sector_t)-1; in sdebug_device_create_zones()
5346 zsp->z_size = in sdebug_device_create_zones()
5347 min_t(u64, devip->zsize, capacity - zstart); in sdebug_device_create_zones()
5348 } else if ((zstart & (devip->zsize - 1)) == 0) { in sdebug_device_create_zones()
5349 if (devip->zoned) in sdebug_device_create_zones()
5350 zsp->z_type = ZBC_ZTYPE_SWR; in sdebug_device_create_zones()
5352 zsp->z_type = ZBC_ZTYPE_SWP; in sdebug_device_create_zones()
5353 zsp->z_cond = ZC1_EMPTY; in sdebug_device_create_zones()
5354 zsp->z_wp = zsp->z_start; in sdebug_device_create_zones()
5355 zsp->z_size = in sdebug_device_create_zones()
5356 min_t(u64, devip->zcap, capacity - zstart); in sdebug_device_create_zones()
5358 zsp->z_type = ZBC_ZTYPE_GAP; in sdebug_device_create_zones()
5359 zsp->z_cond = ZBC_NOT_WRITE_POINTER; in sdebug_device_create_zones()
5360 zsp->z_wp = (sector_t)-1; in sdebug_device_create_zones()
5361 zsp->z_size = min_t(u64, devip->zsize - devip->zcap, in sdebug_device_create_zones()
5362 capacity - zstart); in sdebug_device_create_zones()
5365 WARN_ON_ONCE((int)zsp->z_size <= 0); in sdebug_device_create_zones()
5366 zstart += zsp->z_size; in sdebug_device_create_zones()
5380 uuid_gen(&devip->lu_name); in sdebug_device_create()
5383 devip->lu_name = shared_uuid; in sdebug_device_create()
5387 devip->lu_name = shared_uuid; in sdebug_device_create()
5390 devip->sdbg_host = sdbg_host; in sdebug_device_create()
5392 devip->zoned = sdeb_zbc_model == BLK_ZONED_HM; in sdebug_device_create()
5398 devip->zoned = false; in sdebug_device_create()
5400 devip->create_ts = ktime_get_boottime(); in sdebug_device_create()
5401 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0)); in sdebug_device_create()
5402 spin_lock_init(&devip->list_lock); in sdebug_device_create()
5403 INIT_LIST_HEAD(&devip->inject_err_list); in sdebug_device_create()
5404 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list); in sdebug_device_create()
5415 sdbg_host = shost_to_sdebug_host(sdev->host); in find_build_dev_info()
5417 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { in find_build_dev_info()
5418 if ((devip->used) && (devip->channel == sdev->channel) && in find_build_dev_info()
5419 (devip->target == sdev->id) && in find_build_dev_info()
5420 (devip->lun == sdev->lun)) in find_build_dev_info()
5423 if ((!devip->used) && (!open_devip)) in find_build_dev_info()
5435 open_devip->channel = sdev->channel; in find_build_dev_info()
5436 open_devip->target = sdev->id; in find_build_dev_info()
5437 open_devip->lun = sdev->lun; in find_build_dev_info()
5438 open_devip->sdbg_host = sdbg_host; in find_build_dev_info()
5439 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm); in find_build_dev_info()
5440 open_devip->used = true; in find_build_dev_info()
5448 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); in scsi_debug_slave_alloc()
5456 (struct sdebug_dev_info *)sdp->hostdata; in scsi_debug_slave_configure()
5461 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); in scsi_debug_slave_configure()
5462 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN) in scsi_debug_slave_configure()
5463 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN; in scsi_debug_slave_configure()
5469 sdp->hostdata = devip; in scsi_debug_slave_configure()
5471 sdp->no_uld_attach = 1; in scsi_debug_slave_configure()
5475 sdp->allow_restart = 1; in scsi_debug_slave_configure()
5477 devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev), in scsi_debug_slave_configure()
5479 if (IS_ERR_OR_NULL(devip->debugfs_entry)) in scsi_debug_slave_configure()
5481 __func__, dev_name(&sdp->sdev_gendev)); in scsi_debug_slave_configure()
5483 dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp, in scsi_debug_slave_configure()
5487 __func__, dev_name(&sdp->sdev_gendev)); in scsi_debug_slave_configure()
5495 (struct sdebug_dev_info *)sdp->hostdata; in scsi_debug_slave_destroy()
5500 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); in scsi_debug_slave_destroy()
5505 spin_lock(&devip->list_lock); in scsi_debug_slave_destroy()
5506 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in scsi_debug_slave_destroy()
5507 list_del_rcu(&err->list); in scsi_debug_slave_destroy()
5508 call_rcu(&err->rcu, sdebug_err_free); in scsi_debug_slave_destroy()
5510 spin_unlock(&devip->list_lock); in scsi_debug_slave_destroy()
5512 debugfs_remove(devip->debugfs_entry); in scsi_debug_slave_destroy()
5514 /* make this slot available for re-use */ in scsi_debug_slave_destroy()
5515 devip->used = false; in scsi_debug_slave_destroy()
5516 sdp->hostdata = NULL; in scsi_debug_slave_destroy()
5524 int res = hrtimer_try_to_cancel(&sd_dp->hrt); in stop_qc_helper()
5528 case -1: /* -1 It's executing the CB */ in stop_qc_helper()
5536 if (cancel_work_sync(&sd_dp->ew.work)) in stop_qc_helper()
5555 lockdep_assert_held(&sdsc->lock); in scsi_debug_stop_cmnd()
5559 sd_dp = &sqcp->sd_dp; in scsi_debug_stop_cmnd()
5560 l_defer_t = READ_ONCE(sd_dp->defer_t); in scsi_debug_stop_cmnd()
5570 * Called from scsi_debug_abort() only, which is for timed-out cmd.
5578 spin_lock_irqsave(&sdsc->lock, flags); in scsi_debug_abort_cmnd()
5580 spin_unlock_irqrestore(&sdsc->lock, flags); in scsi_debug_abort_cmnd()
5603 struct Scsi_Host *shost = sdhp->shost; in stop_all_queued()
5605 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL); in stop_all_queued()
5612 struct scsi_device *sdp = cmnd->device; in sdebug_fail_abort()
5613 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_fail_abort()
5615 unsigned char *cmd = cmnd->cmnd; in sdebug_fail_abort()
5622 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_fail_abort()
5623 if (err->type == ERR_ABORT_CMD_FAILED && in sdebug_fail_abort()
5624 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_fail_abort()
5625 ret = !!err->cnt; in sdebug_fail_abort()
5626 if (err->cnt < 0) in sdebug_fail_abort()
5627 err->cnt++; in sdebug_fail_abort()
5641 u8 *cmd = SCpnt->cmnd; in scsi_debug_abort()
5647 sdev_printk(KERN_INFO, SCpnt->device, in scsi_debug_abort()
5665 if (scmd->device == sdp) in scsi_debug_stop_all_queued_iter()
5674 struct Scsi_Host *shost = sdp->host; in scsi_debug_stop_all_queued()
5676 blk_mq_tagset_busy_iter(&shost->tag_set, in scsi_debug_stop_all_queued()
5682 struct scsi_device *sdp = cmnd->device; in sdebug_fail_lun_reset()
5683 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_fail_lun_reset()
5685 unsigned char *cmd = cmnd->cmnd; in sdebug_fail_lun_reset()
5692 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_fail_lun_reset()
5693 if (err->type == ERR_LUN_RESET_FAILED && in sdebug_fail_lun_reset()
5694 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_fail_lun_reset()
5695 ret = !!err->cnt; in sdebug_fail_lun_reset()
5696 if (err->cnt < 0) in sdebug_fail_lun_reset()
5697 err->cnt++; in sdebug_fail_lun_reset()
5710 struct scsi_device *sdp = SCpnt->device; in scsi_debug_device_reset()
5711 struct sdebug_dev_info *devip = sdp->hostdata; in scsi_debug_device_reset()
5712 u8 *cmd = SCpnt->cmnd; in scsi_debug_device_reset()
5722 set_bit(SDEBUG_UA_POR, devip->uas_bm); in scsi_debug_device_reset()
5734 struct scsi_target *starget = scsi_target(cmnd->device); in sdebug_fail_target_reset()
5736 (struct sdebug_target_info *)starget->hostdata; in sdebug_fail_target_reset()
5739 return targetip->reset_fail; in sdebug_fail_target_reset()
5746 struct scsi_device *sdp = SCpnt->device; in scsi_debug_target_reset()
5747 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host); in scsi_debug_target_reset()
5749 u8 *cmd = SCpnt->cmnd; in scsi_debug_target_reset()
5757 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { in scsi_debug_target_reset()
5758 if (devip->target == sdp->id) { in scsi_debug_target_reset()
5759 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); in scsi_debug_target_reset()
5779 struct scsi_device *sdp = SCpnt->device; in scsi_debug_bus_reset()
5780 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host); in scsi_debug_bus_reset()
5789 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { in scsi_debug_bus_reset()
5790 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); in scsi_debug_bus_reset()
5808 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); in scsi_debug_host_reset()
5811 list_for_each_entry(devip, &sdbg_host->dev_info_list, in scsi_debug_host_reset()
5813 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); in scsi_debug_host_reset()
5820 sdev_printk(KERN_INFO, SCpnt->device, in scsi_debug_host_reset()
5840 sectors_per_part = (num_sectors - sdebug_sectors_per) in sdebug_build_parts()
5848 if (starts[k] - starts[k - 1] < max_part_secs) in sdebug_build_parts()
5849 max_part_secs = starts[k] - starts[k - 1]; in sdebug_build_parts()
5859 end_sec = starts[k] + max_part_secs - 1; in sdebug_build_parts()
5860 pp->boot_ind = 0; in sdebug_build_parts()
5862 pp->cyl = start_sec / heads_by_sects; in sdebug_build_parts()
5863 pp->head = (start_sec - (pp->cyl * heads_by_sects)) in sdebug_build_parts()
5865 pp->sector = (start_sec % sdebug_sectors_per) + 1; in sdebug_build_parts()
5867 pp->end_cyl = end_sec / heads_by_sects; in sdebug_build_parts()
5868 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects)) in sdebug_build_parts()
5870 pp->end_sector = (end_sec % sdebug_sectors_per) + 1; in sdebug_build_parts()
5872 pp->start_sect = cpu_to_le32(start_sec); in sdebug_build_parts()
5873 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1); in sdebug_build_parts()
5874 pp->sys_ind = 0x83; /* plain Linux partition */ in sdebug_build_parts()
5878 static void block_unblock_all_queues(bool block) in block_unblock_all_queues() argument
5885 struct Scsi_Host *shost = sdhp->shost; in block_unblock_all_queues()
5887 if (block) in block_unblock_all_queues()
5894 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5946 sd_dp = &sqcp->sd_dp; in sdebug_alloc_queued_cmd()
5948 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); in sdebug_alloc_queued_cmd()
5949 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete; in sdebug_alloc_queued_cmd()
5950 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete); in sdebug_alloc_queued_cmd()
5952 sqcp->scmd = scmd; in sdebug_alloc_queued_cmd()
5969 bool polled = rq->cmd_flags & REQ_POLLED; in schedule_resp()
5982 sdp = cmnd->device; in schedule_resp()
5991 int qdepth = cmnd->device->queue_depth; in schedule_resp()
6010 sd_dp = &sqcp->sd_dp; in schedule_resp()
6016 cmnd->result = pfp ? pfp(cmnd, devip) : 0; in schedule_resp()
6017 if (cmnd->result & SDEG_RES_IMMED_MASK) { in schedule_resp()
6018 cmnd->result &= ~SDEG_RES_IMMED_MASK; in schedule_resp()
6021 if (cmnd->result == 0 && scsi_result != 0) in schedule_resp()
6022 cmnd->result = scsi_result; in schedule_resp()
6023 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) { in schedule_resp()
6027 cmnd->result = check_condition_result; in schedule_resp()
6031 if (unlikely(sdebug_verbose && cmnd->result)) in schedule_resp()
6032 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", in schedule_resp()
6033 __func__, cmnd->result); in schedule_resp()
6054 u64 d = ktime_get_boottime_ns() - ns_from_boot; in schedule_resp()
6063 kt -= d; in schedule_resp()
6067 sd_dp->issuing_cpu = raw_smp_processor_id(); in schedule_resp()
6069 spin_lock_irqsave(&sdsc->lock, flags); in schedule_resp()
6070 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt); in schedule_resp()
6072 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL); in schedule_resp()
6073 spin_unlock_irqrestore(&sdsc->lock, flags); in schedule_resp()
6076 spin_lock_irqsave(&sdsc->lock, flags); in schedule_resp()
6078 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT); in schedule_resp()
6079 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED); in schedule_resp()
6081 * The completion handler will try to grab sqcp->lock, in schedule_resp()
6086 spin_unlock_irqrestore(&sdsc->lock, flags); in schedule_resp()
6091 sd_dp->aborted = true; in schedule_resp()
6098 sd_dp->issuing_cpu = raw_smp_processor_id(); in schedule_resp()
6100 spin_lock_irqsave(&sdsc->lock, flags); in schedule_resp()
6102 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot); in schedule_resp()
6103 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL); in schedule_resp()
6104 spin_unlock_irqrestore(&sdsc->lock, flags); in schedule_resp()
6106 spin_lock_irqsave(&sdsc->lock, flags); in schedule_resp()
6108 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ); in schedule_resp()
6109 schedule_work(&sd_dp->ew.work); in schedule_resp()
6110 spin_unlock_irqrestore(&sdsc->lock, flags); in schedule_resp()
6116 respond_in_thread: /* call back to mid-layer using invocation thread */ in schedule_resp()
6117 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0; in schedule_resp()
6118 cmnd->result &= ~SDEG_RES_IMMED_MASK; in schedule_resp()
6119 if (cmnd->result == 0 && scsi_result != 0) in schedule_resp()
6120 cmnd->result = scsi_result; in schedule_resp()
6129 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6212 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6214 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6216 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6233 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6238 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6239 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6246 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6248 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6249 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6253 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6254 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6257 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6264 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6265 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6266 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6269 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6285 if (k >= (SDEBUG_INFO_LEN - 1)) in scsi_debug_info()
6287 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k, in scsi_debug_info()
6303 return -EACCES; in scsi_debug_write_info()
6307 return -EINVAL; in scsi_debug_write_info()
6328 int queue_num = data->queue_num; in sdebug_submit_queue_iter()
6334 if (*data->first == -1) in sdebug_submit_queue_iter()
6335 *data->first = *data->last = tag; in sdebug_submit_queue_iter()
6337 *data->last = tag; in sdebug_submit_queue_iter()
6361 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n", in scsi_debug_show_info()
6377 int f = -1, l = -1; in scsi_debug_show_info()
6384 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter, in scsi_debug_show_info()
6392 seq_printf(m, "this host_no=%d\n", host->host_no); in scsi_debug_show_info()
6402 idx = sdhp->si_idx; in scsi_debug_show_info()
6404 sdhp->shost->host_no, idx); in scsi_debug_show_info()
6426 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6443 struct Scsi_Host *shost = sdhp->shost; in delay_store()
6446 res = -EBUSY; /* queued commands */ in delay_store()
6459 return -EINVAL; in delay_store()
6467 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6484 struct Scsi_Host *shost = sdhp->shost; in ndelay_store()
6487 res = -EBUSY; /* queued commands */ in ndelay_store()
6502 return -EINVAL; in ndelay_store()
6526 return -EINVAL; in opts_store()
6547 return -EINVAL; in ptype_store()
6551 return -EINVAL; in ptype_store()
6555 return -EINVAL; in ptype_store()
6572 return -EINVAL; in dsense_store()
6594 if (want_store) { /* 1 --> 0 transition, set up store */ in fake_rw_store()
6607 if (sdhp->si_idx != idx) { in fake_rw_store()
6608 xa_set_mark(per_store_ap, sdhp->si_idx, in fake_rw_store()
6610 sdhp->si_idx = idx; in fake_rw_store()
6614 } else { /* 0 --> 1 transition is trigger for shrink */ in fake_rw_store()
6620 return -EINVAL; in fake_rw_store()
6637 return -EINVAL; in no_lun_0_store()
6655 return -EINVAL; in num_tgts_store()
6676 return -EINVAL; in per_host_store_store()
6708 return -EINVAL; in every_nth_store()
6732 return -EINVAL; in lun_format_store()
6736 return -EINVAL; in lun_format_store()
6740 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */ in lun_format_store()
6746 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) { in lun_format_store()
6747 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm); in lun_format_store()
6754 return -EINVAL; in lun_format_store()
6771 return -EINVAL; in max_luns_store()
6776 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */ in max_luns_store()
6783 list_for_each_entry(dp, &sdhp->dev_info_list, in max_luns_store()
6786 dp->uas_bm); in max_luns_store()
6793 return -EINVAL; in max_luns_store()
6817 count = -EBUSY; in max_queue_store()
6821 return -EINVAL; in max_queue_store()
6840 return -EINVAL; in no_rwlock_store()
6877 return -ENOTSUPP; in virtual_gb_store()
6890 list_for_each_entry(dp, &sdhp->dev_info_list, in virtual_gb_store()
6893 dp->uas_bm); in virtual_gb_store()
6900 return -EINVAL; in virtual_gb_store()
6920 return -EINVAL; in add_host_store()
6931 if (found) /* re-use case */ in add_host_store()
6938 } while (--delta_hosts); in add_host_store()
6961 return -EINVAL; in vpd_use_hostno_store()
6983 return -EINVAL; in statistics_store()
7028 return scnprintf(buf, PAGE_SIZE, "0-%u\n", in map_show()
7035 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", in map_show()
7036 (int)map_size, sip->map_storep); in map_show()
7056 return -EINVAL; in random_store()
7076 return -EINVAL; in removable_store()
7094 return -EINVAL; in host_lock_store()
7111 return -EINVAL; in strict_store()
7141 [BLK_ZONED_HA] = "host-aware",
7142 [BLK_ZONED_HM] = "host-managed",
7166 return -EINVAL; in sdeb_zbc_model_str()
7186 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7242 int idx = -1; in scsi_debug_init()
7258 return -EINVAL; in scsi_debug_init()
7272 return -EINVAL; in scsi_debug_init()
7277 return -EINVAL; in scsi_debug_init()
7282 return -EINVAL; in scsi_debug_init()
7287 return -EINVAL; in scsi_debug_init()
7292 return -EINVAL; in scsi_debug_init()
7311 return -EINVAL; in scsi_debug_init()
7316 return -EINVAL; in scsi_debug_init()
7321 return -EINVAL; in scsi_debug_init()
7328 return -EINVAL; in scsi_debug_init()
7339 * check for host managed zoned block device specified with in scsi_debug_init()
7359 return -EINVAL; in scsi_debug_init()
7406 return -EINVAL; in scsi_debug_init()
7438 ret = -ENOMEM; in scsi_debug_init()
7451 k, -ret); in scsi_debug_init()
7458 pr_err("add_host k=%d error=%d\n", k, -ret); in scsi_debug_init()
7483 for (; k; k--) in scsi_debug_exit()
7518 vfree(sip->map_storep); in sdebug_erase_store()
7519 vfree(sip->dif_storep); in sdebug_erase_store()
7520 vfree(sip->storep); in sdebug_erase_store()
7556 return -ENOMEM; in sdebug_add_store()
7563 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res); in sdebug_add_store()
7571 res = -ENOMEM; in sdebug_add_store()
7572 sip->storep = vzalloc(sz); in sdebug_add_store()
7573 if (!sip->storep) { in sdebug_add_store()
7578 sdebug_build_parts(sip->storep, sz); in sdebug_add_store()
7585 sip->dif_storep = vmalloc(dif_size); in sdebug_add_store()
7588 sip->dif_storep); in sdebug_add_store()
7590 if (!sip->dif_storep) { in sdebug_add_store()
7594 memset(sip->dif_storep, 0xff, dif_size); in sdebug_add_store()
7596 /* Logical Block Provisioning */ in sdebug_add_store()
7598 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; in sdebug_add_store()
7599 sip->map_storep = vmalloc(array_size(sizeof(long), in sdebug_add_store()
7604 if (!sip->map_storep) { in sdebug_add_store()
7609 bitmap_zero(sip->map_storep, map_size); in sdebug_add_store()
7616 rwlock_init(&sip->macc_lck); in sdebug_add_store()
7620 pr_warn("%s: failed, errno=%d\n", __func__, -res); in sdebug_add_store()
7627 int error = -ENOMEM; in sdebug_add_host_helper()
7633 return -ENOMEM; in sdebug_add_host_helper()
7637 sdbg_host->si_idx = idx; in sdebug_add_host_helper()
7639 INIT_LIST_HEAD(&sdbg_host->dev_info_list); in sdebug_add_host_helper()
7649 list_add_tail(&sdbg_host->host_list, &sdebug_host_list); in sdebug_add_host_helper()
7652 sdbg_host->dev.bus = &pseudo_lld_bus; in sdebug_add_host_helper()
7653 sdbg_host->dev.parent = pseudo_primary; in sdebug_add_host_helper()
7654 sdbg_host->dev.release = &sdebug_release_adapter; in sdebug_add_host_helper()
7655 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts); in sdebug_add_host_helper()
7657 error = device_register(&sdbg_host->dev); in sdebug_add_host_helper()
7660 list_del(&sdbg_host->host_list); in sdebug_add_host_helper()
7669 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, in sdebug_add_host_helper()
7671 list_del(&sdbg_devinfo->dev_list); in sdebug_add_host_helper()
7672 kfree(sdbg_devinfo->zstate); in sdebug_add_host_helper()
7675 if (sdbg_host->dev.release) in sdebug_add_host_helper()
7676 put_device(&sdbg_host->dev); in sdebug_add_host_helper()
7679 pr_warn("%s: failed, errno=%d\n", __func__, -error); in sdebug_add_host_helper()
7697 int idx = -1; in sdebug_do_remove_host()
7705 idx = sdbg_host->si_idx; in sdebug_do_remove_host()
7713 if (idx == sdbg_host2->si_idx) { in sdebug_do_remove_host()
7721 --sdeb_most_recent_idx; in sdebug_do_remove_host()
7725 list_del(&sdbg_host->host_list); in sdebug_do_remove_host()
7731 device_unregister(&sdbg_host->dev); in sdebug_do_remove_host()
7732 --sdebug_num_hosts; in sdebug_do_remove_host()
7737 struct sdebug_dev_info *devip = sdev->hostdata; in sdebug_change_qdepth()
7740 return -ENODEV; in sdebug_change_qdepth()
7752 if (qdepth != sdev->queue_depth) in sdebug_change_qdepth()
7761 return sdev->queue_depth; in sdebug_change_qdepth()
7767 if (sdebug_every_nth < -1) in fake_timeout()
7768 sdebug_every_nth = -1; in fake_timeout()
7784 struct scsi_device *sdp = scp->device; in resp_not_ready()
7786 stopped_state = atomic_read(&devip->stopped); in resp_not_ready()
7788 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) { in resp_not_ready()
7789 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts)); in resp_not_ready()
7792 atomic_set(&devip->stopped, 0); in resp_not_ready()
7800 if (scp->cmnd[0] == TEST_UNIT_READY) { in resp_not_ready()
7804 diff_ns = tur_nanosecs_to_ready - diff_ns; in resp_not_ready()
7807 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */ in resp_not_ready()
7809 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, in resp_not_ready()
7825 if (shost->nr_hw_queues == 1) in sdebug_map_queues()
7829 struct blk_mq_queue_map *map = &shost->tag_set.map[i]; in sdebug_map_queues()
7831 map->nr_queues = 0; in sdebug_map_queues()
7834 map->nr_queues = submit_queues - poll_queues; in sdebug_map_queues()
7836 map->nr_queues = poll_queues; in sdebug_map_queues()
7838 if (!map->nr_queues) { in sdebug_map_queues()
7843 map->queue_offset = qoff; in sdebug_map_queues()
7846 qoff += map->nr_queues; in sdebug_map_queues()
7869 int queue_num = data->queue_num; in sdebug_blk_mq_poll_iter()
7877 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state)) in sdebug_blk_mq_poll_iter()
7882 spin_lock_irqsave(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
7885 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
7889 sd_dp = &sqcp->sd_dp; in sdebug_blk_mq_poll_iter()
7890 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) { in sdebug_blk_mq_poll_iter()
7891 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
7895 if (time < sd_dp->cmpl_ts) { in sdebug_blk_mq_poll_iter()
7896 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
7901 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
7905 if (raw_smp_processor_id() != sd_dp->issuing_cpu) in sdebug_blk_mq_poll_iter()
7912 (*data->num_entries)++; in sdebug_blk_mq_poll_iter()
7924 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter, in sdebug_blk_mq_poll()
7934 struct scsi_device *sdp = cmnd->device; in sdebug_timeout_cmd()
7935 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_timeout_cmd()
7937 unsigned char *cmd = cmnd->cmnd; in sdebug_timeout_cmd()
7944 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_timeout_cmd()
7945 if (err->type == ERR_TMOUT_CMD && in sdebug_timeout_cmd()
7946 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_timeout_cmd()
7947 ret = !!err->cnt; in sdebug_timeout_cmd()
7948 if (err->cnt < 0) in sdebug_timeout_cmd()
7949 err->cnt++; in sdebug_timeout_cmd()
7962 struct scsi_device *sdp = cmnd->device; in sdebug_fail_queue_cmd()
7963 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_fail_queue_cmd()
7965 unsigned char *cmd = cmnd->cmnd; in sdebug_fail_queue_cmd()
7972 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_fail_queue_cmd()
7973 if (err->type == ERR_FAIL_QUEUE_CMD && in sdebug_fail_queue_cmd()
7974 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_fail_queue_cmd()
7975 ret = err->cnt ? err->queuecmd_ret : 0; in sdebug_fail_queue_cmd()
7976 if (err->cnt < 0) in sdebug_fail_queue_cmd()
7977 err->cnt++; in sdebug_fail_queue_cmd()
7991 struct scsi_device *sdp = cmnd->device; in sdebug_fail_cmd()
7992 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_fail_cmd()
7994 unsigned char *cmd = cmnd->cmnd; in sdebug_fail_cmd()
8002 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_fail_cmd()
8003 if (err->type == ERR_FAIL_CMD && in sdebug_fail_cmd()
8004 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_fail_cmd()
8005 if (!err->cnt) { in sdebug_fail_cmd()
8010 ret = !!err->cnt; in sdebug_fail_cmd()
8020 if (err->cnt < 0) in sdebug_fail_cmd()
8021 err->cnt++; in sdebug_fail_cmd()
8022 mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq); in sdebug_fail_cmd()
8023 result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24; in sdebug_fail_cmd()
8034 struct scsi_device *sdp = scp->device; in scsi_debug_queuecommand()
8038 u8 *cmd = scp->cmnd; in scsi_debug_queuecommand()
8043 u64 lun_index = sdp->lun & 0x3FFF; in scsi_debug_queuecommand()
8064 len = scp->cmd_len; in scsi_debug_queuecommand()
8070 n += scnprintf(b + n, sb - n, "%02x ", in scsi_debug_queuecommand()
8078 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS); in scsi_debug_queuecommand()
8084 devip = (struct sdebug_dev_info *)sdp->hostdata; in scsi_debug_queuecommand()
8116 na = oip->num_attached; in scsi_debug_queuecommand()
8117 r_pfp = oip->pfp; in scsi_debug_queuecommand()
8120 if (FF_SA & r_oip->flags) { in scsi_debug_queuecommand()
8121 if (F_SA_LOW & oip->flags) in scsi_debug_queuecommand()
8125 for (k = 0; k <= na; oip = r_oip->arrp + k++) { in scsi_debug_queuecommand()
8126 if (opcode == oip->opcode && sa == oip->sa) in scsi_debug_queuecommand()
8130 for (k = 0; k <= na; oip = r_oip->arrp + k++) { in scsi_debug_queuecommand()
8131 if (opcode == oip->opcode) in scsi_debug_queuecommand()
8136 if (F_SA_LOW & r_oip->flags) in scsi_debug_queuecommand()
8138 else if (F_SA_HIGH & r_oip->flags) in scsi_debug_queuecommand()
8145 flags = oip->flags; in scsi_debug_queuecommand()
8161 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) { in scsi_debug_queuecommand()
8162 rem = ~oip->len_mask[k] & cmd[k]; in scsi_debug_queuecommand()
8164 for (j = 7; j >= 0; --j, rem <<= 1) { in scsi_debug_queuecommand()
8174 find_first_bit(devip->uas_bm, in scsi_debug_queuecommand()
8180 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) && in scsi_debug_queuecommand()
8181 atomic_read(&devip->stopped))) { in scsi_debug_queuecommand()
8192 if (likely(oip->pfp)) in scsi_debug_queuecommand()
8193 pfp = oip->pfp; /* calls a resp_* function */ in scsi_debug_queuecommand()
8226 spin_lock_init(&sdsc->lock); in sdebug_init_cmd_priv()
8254 .max_sectors = -1U,
8255 .max_segment_size = -1U,
8276 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1; in sdebug_driver_probe()
8281 error = -ENODEV; in sdebug_driver_probe()
8293 hpnt->nr_hw_queues = submit_queues; in sdebug_driver_probe()
8295 hpnt->host_tagset = 1; in sdebug_driver_probe()
8298 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) { in sdebug_driver_probe()
8300 my_name, poll_queues, hpnt->nr_hw_queues); in sdebug_driver_probe()
8306 * left over for non-polled I/O. in sdebug_driver_probe()
8314 my_name, submit_queues - 1); in sdebug_driver_probe()
8318 hpnt->nr_maps = 3; in sdebug_driver_probe()
8320 sdbg_host->shost = hpnt; in sdebug_driver_probe()
8321 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id)) in sdebug_driver_probe()
8322 hpnt->max_id = sdebug_num_tgts + 1; in sdebug_driver_probe()
8324 hpnt->max_id = sdebug_num_tgts; in sdebug_driver_probe()
8326 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; in sdebug_driver_probe()
8377 error = scsi_add_host(hpnt, &sdbg_host->dev); in sdebug_driver_probe()
8380 error = -ENODEV; in sdebug_driver_probe()
8396 scsi_remove_host(sdbg_host->shost); in sdebug_driver_remove()
8398 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, in sdebug_driver_remove()
8400 list_del(&sdbg_devinfo->dev_list); in sdebug_driver_remove()
8401 kfree(sdbg_devinfo->zstate); in sdebug_driver_remove()
8405 scsi_host_put(sdbg_host->shost); in sdebug_driver_remove()