Lines Matching +full:tf +full:- +full:a
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-scsi.c - helper library for ATA
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
9 * as Documentation/driver-api/libata.rst
12 * - http://www.t10.org/
13 * - http://www.t13.org/
38 #include "libata-transport.h"
66 RW_RECOVERY_MPAGE_LEN - 2,
76 CACHE_MPAGE_LEN - 2,
85 CONTROL_MPAGE_LEN - 2,
87 0, /* [QAM+QERR may be 1, see 05-359r1] */
89 0, 30 /* extended self test time, see 05-359r1 */
103 ap = ata_shost_to_port(sdev->host); in ata_scsi_park_show()
105 spin_lock_irq(ap->lock); in ata_scsi_park_show()
108 rc = -ENODEV; in ata_scsi_park_show()
111 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { in ata_scsi_park_show()
112 rc = -EOPNOTSUPP; in ata_scsi_park_show()
116 link = dev->link; in ata_scsi_park_show()
118 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS && in ata_scsi_park_show()
119 link->eh_context.unloaded_mask & (1 << dev->devno) && in ata_scsi_park_show()
120 time_after(dev->unpark_deadline, now)) in ata_scsi_park_show()
121 msecs = jiffies_to_msecs(dev->unpark_deadline - now); in ata_scsi_park_show()
126 spin_unlock_irq(ap->lock); in ata_scsi_park_show()
145 if (input < -2) in ata_scsi_park_store()
146 return -EINVAL; in ata_scsi_park_store()
148 rc = -EOVERFLOW; in ata_scsi_park_store()
152 ap = ata_shost_to_port(sdev->host); in ata_scsi_park_store()
154 spin_lock_irqsave(ap->lock, flags); in ata_scsi_park_store()
157 rc = -ENODEV; in ata_scsi_park_store()
160 if (dev->class != ATA_DEV_ATA && in ata_scsi_park_store()
161 dev->class != ATA_DEV_ZAC) { in ata_scsi_park_store()
162 rc = -EOPNOTSUPP; in ata_scsi_park_store()
167 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { in ata_scsi_park_store()
168 rc = -EOPNOTSUPP; in ata_scsi_park_store()
172 dev->unpark_deadline = ata_deadline(jiffies, input); in ata_scsi_park_store()
173 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK; in ata_scsi_park_store()
175 complete(&ap->park_req_pending); in ata_scsi_park_store()
178 case -1: in ata_scsi_park_store()
179 dev->flags &= ~ATA_DFLAG_NO_UNLOAD; in ata_scsi_park_store()
181 case -2: in ata_scsi_park_store()
182 dev->flags |= ATA_DFLAG_NO_UNLOAD; in ata_scsi_park_store()
187 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_park_store()
214 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); in ata_scsi_set_sense()
221 const struct ata_taskfile *tf) in ata_scsi_set_sense_information() argument
225 information = ata_tf_read_block(tf, dev); in ata_scsi_set_sense_information()
229 scsi_set_sense_information(cmd->sense_buffer, in ata_scsi_set_sense_information()
238 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in ata_scsi_set_invalid_field()
247 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in ata_scsi_set_invalid_parameter()
267 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
274 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
297 * ata_scsi_unlock_native_capacity - unlock native capacity
300 * This function is called if a partition on @sdev extends beyond
308 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_unlock_native_capacity()
312 spin_lock_irqsave(ap->lock, flags); in ata_scsi_unlock_native_capacity()
315 if (dev && dev->n_sectors < dev->n_native_sectors) { in ata_scsi_unlock_native_capacity()
316 dev->flags |= ATA_DFLAG_UNLOCK_HPA; in ata_scsi_unlock_native_capacity()
317 dev->link->eh_info.action |= ATA_EH_RESET; in ata_scsi_unlock_native_capacity()
321 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_unlock_native_capacity()
327 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
346 return -ENOMSG; in ata_get_identity()
348 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16))) in ata_get_identity()
349 return -EFAULT; in ata_get_identity()
351 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN); in ata_get_identity()
353 return -EFAULT; in ata_get_identity()
355 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN); in ata_get_identity()
357 return -EFAULT; in ata_get_identity()
359 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN); in ata_get_identity()
361 return -EFAULT; in ata_get_identity()
367 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
393 return -EINVAL; in ata_cmd_ioctl()
396 return -EFAULT; in ata_cmd_ioctl()
405 rc = -ENOMEM; in ata_cmd_ioctl()
409 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ in ata_cmd_ioctl()
413 scsi_cmd[1] = (3 << 1); /* Non-data */ in ata_cmd_ioctl()
420 if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */ in ata_cmd_ioctl()
441 /* If we set cc then ATA pass-through will cause a in ata_cmd_ioctl()
449 /* Send userspace a few ATA registers (same as drivers/ide) */ in ata_cmd_ioctl()
456 rc = -EFAULT; in ata_cmd_ioctl()
462 rc = -EIO; in ata_cmd_ioctl()
468 rc = -EFAULT; in ata_cmd_ioctl()
475 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
500 return -EINVAL; in ata_task_ioctl()
503 return -EFAULT; in ata_task_ioctl()
508 scsi_cmd[1] = (3 << 1); /* Non-data */ in ata_task_ioctl()
529 /* If we set cc then ATA pass-through will cause a in ata_task_ioctl()
548 rc = -EFAULT; in ata_task_ioctl()
553 rc = -EIO; in ata_task_ioctl()
563 if (ap->flags & ATA_FLAG_PIO_DMA) in ata_ioc32()
565 if (ap->pflags & ATA_PFLAG_PIO32) in ata_ioc32()
572 * here must have a compatible argument, or check in_compat_syscall()
578 int rc = -EINVAL; in ata_sas_scsi_ioctl()
583 spin_lock_irqsave(ap->lock, flags); in ata_sas_scsi_ioctl()
585 spin_unlock_irqrestore(ap->lock, flags); in ata_sas_scsi_ioctl()
595 spin_lock_irqsave(ap->lock, flags); in ata_sas_scsi_ioctl()
596 if (ap->pflags & ATA_PFLAG_PIO32CHANGE) { in ata_sas_scsi_ioctl()
598 ap->pflags |= ATA_PFLAG_PIO32; in ata_sas_scsi_ioctl()
600 ap->pflags &= ~ATA_PFLAG_PIO32; in ata_sas_scsi_ioctl()
603 rc = -EINVAL; in ata_sas_scsi_ioctl()
605 spin_unlock_irqrestore(ap->lock, flags); in ata_sas_scsi_ioctl()
613 return -EACCES; in ata_sas_scsi_ioctl()
618 return -EACCES; in ata_sas_scsi_ioctl()
622 rc = -ENOTTY; in ata_sas_scsi_ioctl()
633 return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host), in ata_scsi_ioctl()
639 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
643 * Obtain a reference to an unused ata_queued_cmd structure,
644 * which is the basic libata structure representing a single
647 * If a command was available, fill in the SCSI-specific
660 struct ata_port *ap = dev->link->ap; in ata_scsi_qc_new()
667 if (ap->flags & ATA_FLAG_SAS_HOST) { in ata_scsi_qc_new()
670 * unique per-device budget token as a tag. in ata_scsi_qc_new()
672 if (WARN_ON_ONCE(cmd->budget_token >= ATA_MAX_QUEUE)) in ata_scsi_qc_new()
674 tag = cmd->budget_token; in ata_scsi_qc_new()
676 tag = scsi_cmd_to_rq(cmd)->tag; in ata_scsi_qc_new()
680 qc->tag = qc->hw_tag = tag; in ata_scsi_qc_new()
681 qc->ap = ap; in ata_scsi_qc_new()
682 qc->dev = dev; in ata_scsi_qc_new()
686 qc->scsicmd = cmd; in ata_scsi_qc_new()
687 qc->scsidone = scsi_done; in ata_scsi_qc_new()
689 qc->sg = scsi_sglist(cmd); in ata_scsi_qc_new()
690 qc->n_elem = scsi_sg_count(cmd); in ata_scsi_qc_new()
692 if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET) in ata_scsi_qc_new()
693 qc->flags |= ATA_QCFLAG_QUIET; in ata_scsi_qc_new()
706 struct scsi_cmnd *scmd = qc->scsicmd; in ata_qc_set_pc_nbytes()
708 qc->extrabytes = scmd->extra_len; in ata_qc_set_pc_nbytes()
709 qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes; in ata_qc_set_pc_nbytes()
713 * ata_to_sense_error - convert ATA error to SCSI error
721 * Converts an ATA error into a SCSI error. Fill out pointers to
756 /* TRK0 - Track 0 not found */ in ata_to_sense_error()
763 /* SRV/IDNF - ID not found */ in ata_to_sense_error()
766 /* MC - Media Changed */ in ata_to_sense_error()
769 /* ECC - Uncorrectable ECC error */ in ata_to_sense_error()
772 /* BBD - block marked bad */ in ata_to_sense_error()
828 * We need a sensible error return here, which is tricky, and one in ata_to_sense_error()
829 * that won't cause people to do things like return a disk wrongly. in ata_to_sense_error()
837 * ata_gen_passthru_sense - Generate check condition sense block.
842 * of whether the command errored or not, return a sense
847 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
855 struct scsi_cmnd *cmd = qc->scsicmd; in ata_gen_passthru_sense()
856 struct ata_taskfile *tf = &qc->result_tf; in ata_gen_passthru_sense() local
857 unsigned char *sb = cmd->sense_buffer; in ata_gen_passthru_sense()
867 if (qc->err_mask || in ata_gen_passthru_sense()
868 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { in ata_gen_passthru_sense()
869 ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, in ata_gen_passthru_sense()
871 ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); in ata_gen_passthru_sense()
874 * ATA PASS-THROUGH INFORMATION AVAILABLE in ata_gen_passthru_sense()
880 if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) { in ata_gen_passthru_sense()
898 desc[3] = tf->error; in ata_gen_passthru_sense()
899 desc[5] = tf->nsect; in ata_gen_passthru_sense()
900 desc[7] = tf->lbal; in ata_gen_passthru_sense()
901 desc[9] = tf->lbam; in ata_gen_passthru_sense()
902 desc[11] = tf->lbah; in ata_gen_passthru_sense()
903 desc[12] = tf->device; in ata_gen_passthru_sense()
904 desc[13] = tf->status; in ata_gen_passthru_sense()
910 if (tf->flags & ATA_TFLAG_LBA48) { in ata_gen_passthru_sense()
912 desc[4] = tf->hob_nsect; in ata_gen_passthru_sense()
913 desc[6] = tf->hob_lbal; in ata_gen_passthru_sense()
914 desc[8] = tf->hob_lbam; in ata_gen_passthru_sense()
915 desc[10] = tf->hob_lbah; in ata_gen_passthru_sense()
919 desc[0] = tf->error; in ata_gen_passthru_sense()
920 desc[1] = tf->status; in ata_gen_passthru_sense()
921 desc[2] = tf->device; in ata_gen_passthru_sense()
922 desc[3] = tf->nsect; in ata_gen_passthru_sense()
924 if (tf->flags & ATA_TFLAG_LBA48) { in ata_gen_passthru_sense()
926 if (tf->hob_nsect) in ata_gen_passthru_sense()
928 if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah) in ata_gen_passthru_sense()
931 desc[9] = tf->lbal; in ata_gen_passthru_sense()
932 desc[10] = tf->lbam; in ata_gen_passthru_sense()
933 desc[11] = tf->lbah; in ata_gen_passthru_sense()
938 * ata_gen_ata_sense - generate a SCSI fixed sense block
941 * Generate sense block for a failed ATA command @qc. Descriptor
949 struct ata_device *dev = qc->dev; in ata_gen_ata_sense()
950 struct scsi_cmnd *cmd = qc->scsicmd; in ata_gen_ata_sense()
951 struct ata_taskfile *tf = &qc->result_tf; in ata_gen_ata_sense() local
952 unsigned char *sb = cmd->sense_buffer; in ata_gen_ata_sense()
967 if (qc->err_mask || in ata_gen_ata_sense()
968 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { in ata_gen_ata_sense()
969 ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, in ata_gen_ata_sense()
975 tf->status, qc->err_mask); in ata_gen_ata_sense()
980 block = ata_tf_read_block(&qc->result_tf, dev); in ata_gen_ata_sense()
989 sdev->use_10_for_rw = 1; in ata_scsi_sdev_config()
990 sdev->use_10_for_ms = 1; in ata_scsi_sdev_config()
991 sdev->no_write_same = 1; in ata_scsi_sdev_config()
993 /* Schedule policy is determined by ->qc_defer() callback and in ata_scsi_sdev_config()
998 sdev->max_device_blocked = 1; in ata_scsi_sdev_config()
1002 * ata_scsi_dma_need_drain - Check whether data transfer may overflow
1020 return atapi_cmd_type(scmd->cmnd[0]) == ATAPI_MISC; in ata_scsi_dma_need_drain()
1026 struct request_queue *q = sdev->request_queue; in ata_scsi_dev_config()
1029 if (!ata_id_has_unload(dev->id)) in ata_scsi_dev_config()
1030 dev->flags |= ATA_DFLAG_NO_UNLOAD; in ata_scsi_dev_config()
1033 dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors); in ata_scsi_dev_config()
1034 blk_queue_max_hw_sectors(q, dev->max_sectors); in ata_scsi_dev_config()
1036 if (dev->class == ATA_DEV_ATAPI) { in ata_scsi_dev_config()
1037 sdev->sector_size = ATA_SECT_SIZE; in ata_scsi_dev_config()
1040 blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); in ata_scsi_dev_config()
1043 blk_queue_max_segments(q, queue_max_segments(q) - 1); in ata_scsi_dev_config()
1045 sdev->dma_drain_len = ATAPI_MAX_DRAIN; in ata_scsi_dev_config()
1046 sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO); in ata_scsi_dev_config()
1047 if (!sdev->dma_drain_buf) { in ata_scsi_dev_config()
1049 return -ENOMEM; in ata_scsi_dev_config()
1052 sdev->sector_size = ata_id_logical_sector_size(dev->id); in ata_scsi_dev_config()
1063 sdev->manage_runtime_start_stop = 1; in ata_scsi_dev_config()
1064 sdev->manage_shutdown = 1; in ata_scsi_dev_config()
1065 sdev->force_runtime_start_on_system_start = 1; in ata_scsi_dev_config()
1075 if (sdev->sector_size > PAGE_SIZE) in ata_scsi_dev_config()
1078 sdev->sector_size); in ata_scsi_dev_config()
1080 blk_queue_update_dma_alignment(q, sdev->sector_size - 1); in ata_scsi_dev_config()
1082 if (dev->flags & ATA_DFLAG_AN) in ata_scsi_dev_config()
1083 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); in ata_scsi_dev_config()
1086 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); in ata_scsi_dev_config()
1090 if (dev->flags & ATA_DFLAG_TRUSTED) in ata_scsi_dev_config()
1091 sdev->security_supported = 1; in ata_scsi_dev_config()
1093 dev->sdev = sdev; in ata_scsi_dev_config()
1098 * ata_scsi_slave_alloc - Early setup of SCSI device
1102 * associated with an ATA device is scanned on a port.
1110 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_slave_alloc()
1116 * Create a link from the ata_port device to the scsi device to ensure in ata_scsi_slave_alloc()
1120 link = device_link_add(&sdev->sdev_gendev, &ap->tdev, in ata_scsi_slave_alloc()
1125 dev_name(&sdev->sdev_gendev)); in ata_scsi_slave_alloc()
1126 return -ENODEV; in ata_scsi_slave_alloc()
1134 * ata_scsi_slave_config - Set SCSI device attributes
1139 * SCSI mid-layer behaviors.
1147 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_slave_config()
1158 * ata_scsi_slave_destroy - SCSI device is about to be destroyed
1163 * dev->sdev, this function doesn't have to do anything.
1164 * Otherwise, SCSI layer initiated warm-unplug is in progress.
1165 * Clear dev->sdev, schedule the device for ATA detach and invoke
1173 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_slave_destroy()
1177 device_link_remove(&sdev->sdev_gendev, &ap->tdev); in ata_scsi_slave_destroy()
1179 spin_lock_irqsave(ap->lock, flags); in ata_scsi_slave_destroy()
1181 if (dev && dev->sdev) { in ata_scsi_slave_destroy()
1183 dev->sdev = NULL; in ata_scsi_slave_destroy()
1184 dev->flags |= ATA_DFLAG_DETACH; in ata_scsi_slave_destroy()
1187 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_slave_destroy()
1189 kfree(sdev->dma_drain_buf); in ata_scsi_slave_destroy()
1194 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
1206 * Zero on success, non-zero on error.
1210 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_start_stop_xlat()
1211 const u8 *cdb = scmd->cmnd; in ata_scsi_start_stop_xlat()
1215 if (scmd->cmd_len < 5) { in ata_scsi_start_stop_xlat()
1234 /* Ignore IMMED bit (cdb[1] & 0x1), violates sat-r05 */ in ata_scsi_start_stop_xlat()
1235 if (!ata_dev_power_init_tf(qc->dev, &qc->tf, cdb[4] & 0x1)) { in ata_scsi_start_stop_xlat()
1236 ata_scsi_set_sense(qc->dev, scmd, ABORTED_COMMAND, 0, 0); in ata_scsi_start_stop_xlat()
1250 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_start_stop_xlat()
1255 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
1265 * Zero on success, non-zero on error.
1269 struct ata_taskfile *tf = &qc->tf; in ata_scsi_flush_xlat() local
1271 tf->flags |= ATA_TFLAG_DEVICE; in ata_scsi_flush_xlat()
1272 tf->protocol = ATA_PROT_NODATA; in ata_scsi_flush_xlat()
1274 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT) in ata_scsi_flush_xlat()
1275 tf->command = ATA_CMD_FLUSH_EXT; in ata_scsi_flush_xlat()
1277 tf->command = ATA_CMD_FLUSH; in ata_scsi_flush_xlat()
1280 qc->flags |= ATA_QCFLAG_IO; in ata_scsi_flush_xlat()
1286 * scsi_6_lba_len - Get LBA and transfer length
1289 * Calculate LBA and transfer length for 6-byte commands.
1311 * scsi_10_lba_len - Get LBA and transfer length
1314 * Calculate LBA and transfer length for 10-byte commands.
1327 * scsi_16_lba_len - Get LBA and transfer length
1330 * Calculate LBA and transfer length for 16-byte commands.
1343 * scsi_dld - Get duration limit descriptor index
1346 * Returns the dld bits indicating the index of a command duration limit
1355 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
1364 * Zero on success, non-zero on error.
1368 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_verify_xlat()
1369 struct ata_taskfile *tf = &qc->tf; in ata_scsi_verify_xlat() local
1370 struct ata_device *dev = qc->dev; in ata_scsi_verify_xlat()
1371 u64 dev_sectors = qc->dev->n_sectors; in ata_scsi_verify_xlat()
1372 const u8 *cdb = scmd->cmnd; in ata_scsi_verify_xlat()
1377 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_scsi_verify_xlat()
1378 tf->protocol = ATA_PROT_NODATA; in ata_scsi_verify_xlat()
1382 if (scmd->cmd_len < 10) { in ata_scsi_verify_xlat()
1389 if (scmd->cmd_len < 16) { in ata_scsi_verify_xlat()
1407 if (dev->flags & ATA_DFLAG_LBA) { in ata_scsi_verify_xlat()
1408 tf->flags |= ATA_TFLAG_LBA; in ata_scsi_verify_xlat()
1412 tf->command = ATA_CMD_VERIFY; in ata_scsi_verify_xlat()
1413 tf->device |= (block >> 24) & 0xf; in ata_scsi_verify_xlat()
1415 if (!(dev->flags & ATA_DFLAG_LBA48)) in ata_scsi_verify_xlat()
1419 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_verify_xlat()
1420 tf->command = ATA_CMD_VERIFY_EXT; in ata_scsi_verify_xlat()
1422 tf->hob_nsect = (n_block >> 8) & 0xff; in ata_scsi_verify_xlat()
1424 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_verify_xlat()
1425 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_verify_xlat()
1426 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_verify_xlat()
1431 tf->nsect = n_block & 0xff; in ata_scsi_verify_xlat()
1433 tf->lbah = (block >> 16) & 0xff; in ata_scsi_verify_xlat()
1434 tf->lbam = (block >> 8) & 0xff; in ata_scsi_verify_xlat()
1435 tf->lbal = block & 0xff; in ata_scsi_verify_xlat()
1437 tf->device |= ATA_LBA; in ata_scsi_verify_xlat()
1446 track = (u32)block / dev->sectors; in ata_scsi_verify_xlat()
1447 cyl = track / dev->heads; in ata_scsi_verify_xlat()
1448 head = track % dev->heads; in ata_scsi_verify_xlat()
1449 sect = (u32)block % dev->sectors + 1; in ata_scsi_verify_xlat()
1452 Cylinder: 0-65535 in ata_scsi_verify_xlat()
1453 Head: 0-15 in ata_scsi_verify_xlat()
1454 Sector: 1-255*/ in ata_scsi_verify_xlat()
1458 tf->command = ATA_CMD_VERIFY; in ata_scsi_verify_xlat()
1459 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ in ata_scsi_verify_xlat()
1460 tf->lbal = sect; in ata_scsi_verify_xlat()
1461 tf->lbam = cyl; in ata_scsi_verify_xlat()
1462 tf->lbah = cyl >> 8; in ata_scsi_verify_xlat()
1463 tf->device |= head; in ata_scsi_verify_xlat()
1469 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_verify_xlat()
1473 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); in ata_scsi_verify_xlat()
1478 scmd->result = SAM_STAT_GOOD; in ata_scsi_verify_xlat()
1490 req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size; in ata_check_nblocks()
1498 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1513 * Zero on success, non-zero on error.
1517 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_rw_xlat()
1518 const u8 *cdb = scmd->cmnd; in ata_scsi_rw_xlat()
1540 if (unlikely(scmd->cmd_len < 10)) { in ata_scsi_rw_xlat()
1552 if (unlikely(scmd->cmd_len < 6)) { in ata_scsi_rw_xlat()
1558 /* for 6-byte r/w commands, transfer length 0 in ata_scsi_rw_xlat()
1568 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_rw_xlat()
1586 /* For 10-byte and 16-byte SCSI R/W commands, transfer in ata_scsi_rw_xlat()
1595 qc->flags |= ATA_QCFLAG_IO; in ata_scsi_rw_xlat()
1596 qc->nbytes = n_block * scmd->device->sector_size; in ata_scsi_rw_xlat()
1602 if (rc == -ERANGE) in ata_scsi_rw_xlat()
1604 /* treat all other errors as -EINVAL, fall through */ in ata_scsi_rw_xlat()
1606 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_rw_xlat()
1610 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); in ata_scsi_rw_xlat()
1615 scmd->result = SAM_STAT_GOOD; in ata_scsi_rw_xlat()
1621 struct scsi_cmnd *cmd = qc->scsicmd; in ata_qc_done()
1622 void (*done)(struct scsi_cmnd *) = qc->scsidone; in ata_qc_done()
1630 struct scsi_cmnd *cmd = qc->scsicmd; in ata_scsi_qc_complete()
1631 u8 *cdb = cmd->cmnd; in ata_scsi_qc_complete()
1632 int need_sense = (qc->err_mask != 0) && in ata_scsi_qc_complete()
1633 !(qc->flags & ATA_QCFLAG_SENSE_VALID); in ata_scsi_qc_complete()
1635 /* For ATA pass thru (SAT) commands, generate a sense block if in ata_scsi_qc_complete()
1637 * generate because the user forced us to [CK_COND =1], a check in ata_scsi_qc_complete()
1642 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE in ata_scsi_qc_complete()
1651 cmd->result &= 0x0000ffff; in ata_scsi_qc_complete()
1657 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1662 * Our ->queuecommand() function has decided that the SCSI
1671 * then cmd->result (and possibly cmd->sense_buffer) are assumed
1685 struct ata_port *ap = dev->link->ap; in ata_scsi_translate()
1693 /* data is present; dma-map it */ in ata_scsi_translate()
1694 if (cmd->sc_data_direction == DMA_FROM_DEVICE || in ata_scsi_translate()
1695 cmd->sc_data_direction == DMA_TO_DEVICE) { in ata_scsi_translate()
1703 qc->dma_dir = cmd->sc_data_direction; in ata_scsi_translate()
1706 qc->complete_fn = ata_scsi_qc_complete; in ata_scsi_translate()
1711 if (ap->ops->qc_defer) { in ata_scsi_translate()
1712 if ((rc = ap->ops->qc_defer(qc))) in ata_scsi_translate()
1728 cmd->result = (DID_ERROR << 16); in ata_scsi_translate()
1748 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
1752 * Takes care of the hard work of simulating a SCSI command...
1756 * completed successfully (0), or not (in which case cmd->result
1766 struct scsi_cmnd *cmd = args->cmd; in ata_scsi_rbuf_fill()
1780 cmd->result = SAM_STAT_GOOD; in ata_scsi_rbuf_fill()
1784 * ata_scsiop_inq_std - Simulate INQUIRY command
1789 * with non-VPD INQUIRY command output.
1798 0x60, /* SAM-3 (no version claimed) */ in ata_scsiop_inq_std()
1801 0x20, /* SBC-2 (no version claimed) */ in ata_scsiop_inq_std()
1804 0x00 /* SPC-3 (no version claimed) */ in ata_scsiop_inq_std()
1808 0xA0, /* SAM-5 (no version claimed) */ in ata_scsiop_inq_std()
1811 0x00, /* SBC-4 (no version claimed) */ in ata_scsiop_inq_std()
1814 0xC0, /* SPC-5 (no version claimed) */ in ata_scsiop_inq_std()
1823 0x5, /* claim SPC-3 version compatibility */ in ata_scsiop_inq_std()
1825 95 - 4, in ata_scsiop_inq_std()
1832 * AHCI port says it's external (Hotplug-capable, eSATA). in ata_scsiop_inq_std()
1834 if (ata_id_removable(args->id) || in ata_scsiop_inq_std()
1835 (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL)) in ata_scsiop_inq_std()
1838 if (args->dev->class == ATA_DEV_ZAC) { in ata_scsiop_inq_std()
1840 hdr[2] = 0x7; /* claim SPC-5 version compatibility */ in ata_scsiop_inq_std()
1843 if (args->dev->flags & ATA_DFLAG_CDL) in ata_scsiop_inq_std()
1844 hdr[2] = 0xd; /* claim SPC-6 version compatibility */ in ata_scsiop_inq_std()
1848 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); in ata_scsiop_inq_std()
1851 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4); in ata_scsiop_inq_std()
1853 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); in ata_scsiop_inq_std()
1856 memcpy(&rbuf[32], "n/a ", 4); in ata_scsiop_inq_std()
1858 if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC) in ata_scsiop_inq_std()
1867 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
1893 !(args->dev->flags & ATA_DFLAG_ZAC)) in ata_scsiop_inq_00()
1903 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
1922 ata_id_string(args->id, (unsigned char *) &rbuf[4], in ata_scsiop_inq_80()
1928 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
1933 * - vendor specific ASCII containing the ATA serial number
1934 * - SAT defined "t10 vendor id based" containing ASCII vendor
1952 ata_id_string(args->id, (unsigned char *) rbuf + num, in ata_scsiop_inq_83()
1964 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD, in ata_scsiop_inq_83()
1967 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO, in ata_scsiop_inq_83()
1971 if (ata_id_has_wwn(args->id)) { in ata_scsiop_inq_83()
1978 ata_id_string(args->id, (unsigned char *) rbuf + num, in ata_scsiop_inq_83()
1982 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ in ata_scsiop_inq_83()
1987 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
1991 * Yields SAT-specified ATA VPD page.
2017 memcpy(&rbuf[60], &args->id[0], 512); in ata_scsiop_inq_89()
2023 struct ata_device *dev = args->dev; in ata_scsiop_inq_b0()
2032 * This is always one physical block, but for disks with a smaller in ata_scsiop_inq_b0()
2036 min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id); in ata_scsiop_inq_b0()
2042 * The ATA spec doesn't even know about a granularity or alignment in ata_scsiop_inq_b0()
2044 * VPD page entries, but we have specifify a granularity to signal in ata_scsiop_inq_b0()
2045 * that we support some form of unmap - in thise case via WRITE SAME in ata_scsiop_inq_b0()
2048 if (ata_id_has_trim(args->id)) { in ata_scsiop_inq_b0()
2051 if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) in ata_scsiop_inq_b0()
2052 max_blocks = 128 << (20 - SECTOR_SHIFT); in ata_scsiop_inq_b0()
2063 int form_factor = ata_id_form_factor(args->id); in ata_scsiop_inq_b1()
2064 int media_rotation_rate = ata_id_rotation_rate(args->id); in ata_scsiop_inq_b1()
2065 u8 zoned = ata_id_zoned_cap(args->id); in ata_scsiop_inq_b1()
2080 /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */ in ata_scsiop_inq_b2()
2091 * zbc-r05 SCSI Zoned Block device characteristics VPD page in ata_scsiop_inq_b6()
2097 * URSWRZ bit is only meaningful for host-managed ZAC drives in ata_scsiop_inq_b6()
2099 if (args->dev->zac_zoned_cap & 1) in ata_scsiop_inq_b6()
2101 put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]); in ata_scsiop_inq_b6()
2102 put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]); in ata_scsiop_inq_b6()
2103 put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]); in ata_scsiop_inq_b6()
2110 struct ata_cpr_log *cpr_log = args->dev->cpr_log; in ata_scsiop_inq_b9()
2114 /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */ in ata_scsiop_inq_b9()
2116 put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]); in ata_scsiop_inq_b9()
2118 for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) { in ata_scsiop_inq_b9()
2119 desc[0] = cpr_log->cpr[i].num; in ata_scsiop_inq_b9()
2120 desc[1] = cpr_log->cpr[i].num_storage_elements; in ata_scsiop_inq_b9()
2121 put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]); in ata_scsiop_inq_b9()
2122 put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]); in ata_scsiop_inq_b9()
2129 * modecpy - Prepare response for MODE SENSE
2135 * Generate a generic MODE SENSE page for either current or changeable
2145 memset(dest + 2, 0, n - 2); in modecpy()
2152 * ata_msense_caching - Simulate MODE SENSE caching info page
2157 * Generate a caching info page, which conditionally indicates
2177 * Simulate MODE SENSE control mode page, sub-page 0.
2188 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); in ata_msense_control_spg0()
2198 * Translate an ATA duration limit in microseconds to a SCSI duration limit
2199 * using the t2cdlunits 0xa (10ms). Since the SCSI duration limits are 2-bytes
2210 * Simulate MODE SENSE control mode page, sub-pages 07h and 08h
2216 u8 *b, *cdl = dev->cdl, *desc; in ata_msense_control_spgt2()
2222 * are a header. The PAGE LENGTH field is the size of the page in ata_msense_control_spgt2()
2227 put_unaligned_be16(CDL_T2_SUB_MPAGE_LEN - 4, &buf[2]); in ata_msense_control_spgt2()
2264 * Simulate MODE SENSE control mode page, sub-page f2h
2275 * The first four bytes of ATA Feature Control mode page are a header. in ata_msense_control_ata_feature()
2278 put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]); in ata_msense_control_ata_feature()
2280 if (dev->flags & ATA_DFLAG_CDL) in ata_msense_control_ata_feature()
2289 * ata_msense_control - Simulate MODE SENSE control mode page
2292 * @spg: sub-page code
2295 * Generate a generic MODE SENSE control mode page.
2325 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
2329 * Generate a generic MODE SENSE r/w error recovery page.
2342 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
2355 struct ata_device *dev = args->dev; in ata_scsiop_mode_sense()
2356 u8 *scsicmd = args->cmd->cmnd, *p = rbuf; in ata_scsiop_mode_sense()
2396 * Supported subpages: all subpages and sub-pages 07h, 08h and f2h of in ata_scsiop_mode_sense()
2406 if (dev->flags & ATA_DFLAG_CDL && pg == CONTROL_MPAGE) in ata_scsiop_mode_sense()
2421 p += ata_msense_caching(args->id, p, page_control == 1); in ata_scsiop_mode_sense()
2425 p += ata_msense_control(args->dev, p, spg, page_control == 1); in ata_scsiop_mode_sense()
2430 p += ata_msense_caching(args->id, p, page_control == 1); in ata_scsiop_mode_sense()
2431 p += ata_msense_control(args->dev, p, spg, page_control == 1); in ata_scsiop_mode_sense()
2439 if (dev->flags & ATA_DFLAG_FUA) in ata_scsiop_mode_sense()
2443 rbuf[0] = p - rbuf - 1; in ata_scsiop_mode_sense()
2450 put_unaligned_be16(p - rbuf - 2, &rbuf[0]); in ata_scsiop_mode_sense()
2460 ata_scsi_set_invalid_field(dev, args->cmd, fp, bp); in ata_scsiop_mode_sense()
2464 ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); in ata_scsiop_mode_sense()
2470 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2481 struct ata_device *dev = args->dev; in ata_scsiop_read_cap()
2482 u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ in ata_scsiop_read_cap()
2487 sector_size = ata_id_logical_sector_size(dev->id); in ata_scsiop_read_cap()
2488 log2_per_phys = ata_id_log2_per_physical_sector(dev->id); in ata_scsiop_read_cap()
2489 lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys); in ata_scsiop_read_cap()
2491 if (args->cmd->cmnd[0] == READ_CAPACITY) { in ata_scsiop_read_cap()
2495 /* sector count, 32-bit */ in ata_scsiop_read_cap()
2507 /* sector count, 64-bit */ in ata_scsiop_read_cap()
2528 if (ata_id_has_trim(args->id) && in ata_scsiop_read_cap()
2529 !(dev->horkage & ATA_HORKAGE_NOTRIM)) { in ata_scsiop_read_cap()
2532 if (ata_id_has_zero_after_trim(args->id) && in ata_scsiop_read_cap()
2533 dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) { in ata_scsiop_read_cap()
2538 if (ata_id_zoned_cap(args->id) || in ata_scsiop_read_cap()
2539 args->dev->class == ATA_DEV_ZAC) in ata_scsiop_read_cap()
2546 * ata_scsiop_report_luns - Simulate REPORT LUNS command
2566 * 1) Fake MMC-5 version, to indicate to the Linux scsi midlayer this is a
2584 struct scsi_cmnd *cmd = qc->scsicmd; in atapi_qc_complete()
2585 unsigned int err_mask = qc->err_mask; in atapi_qc_complete()
2588 if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) { in atapi_qc_complete()
2590 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) { in atapi_qc_complete()
2592 * translation of taskfile registers into a in atapi_qc_complete()
2599 /* SCSI EH automatically locks door if sdev->locked is in atapi_qc_complete()
2602 * creates a loop - SCSI EH issues door lock which in atapi_qc_complete()
2606 * If door lock fails, always clear sdev->locked to in atapi_qc_complete()
2610 * sure qc->dev->sdev isn't NULL before dereferencing. in atapi_qc_complete()
2612 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) in atapi_qc_complete()
2613 qc->dev->sdev->locked = 0; in atapi_qc_complete()
2615 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; in atapi_qc_complete()
2621 if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0) in atapi_qc_complete()
2623 cmd->result = SAM_STAT_GOOD; in atapi_qc_complete()
2628 * atapi_xlat - Initialize PACKET taskfile
2635 * Zero on success, non-zero on failure.
2639 struct scsi_cmnd *scmd = qc->scsicmd; in atapi_xlat()
2640 struct ata_device *dev = qc->dev; in atapi_xlat()
2641 int nodata = (scmd->sc_data_direction == DMA_NONE); in atapi_xlat()
2642 int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO); in atapi_xlat()
2645 memset(qc->cdb, 0, dev->cdb_len); in atapi_xlat()
2646 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); in atapi_xlat()
2648 qc->complete_fn = atapi_qc_complete; in atapi_xlat()
2650 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in atapi_xlat()
2651 if (scmd->sc_data_direction == DMA_TO_DEVICE) { in atapi_xlat()
2652 qc->tf.flags |= ATA_TFLAG_WRITE; in atapi_xlat()
2655 qc->tf.command = ATA_CMD_PACKET; in atapi_xlat()
2696 qc->tf.lbam = (nbytes & 0xFF); in atapi_xlat()
2697 qc->tf.lbah = (nbytes >> 8); in atapi_xlat()
2700 qc->tf.protocol = ATAPI_PROT_NODATA; in atapi_xlat()
2702 qc->tf.protocol = ATAPI_PROT_PIO; in atapi_xlat()
2705 qc->tf.protocol = ATAPI_PROT_DMA; in atapi_xlat()
2706 qc->tf.feature |= ATAPI_PKT_DMA; in atapi_xlat()
2708 if ((dev->flags & ATA_DFLAG_DMADIR) && in atapi_xlat()
2709 (scmd->sc_data_direction != DMA_TO_DEVICE)) in atapi_xlat()
2711 qc->tf.feature |= ATAPI_DMADIR; in atapi_xlat()
2715 /* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE in atapi_xlat()
2723 * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case), in ata_find_dev()
2731 int link_max_devices = ata_link_max_devices(&ap->link); in ata_find_dev()
2734 return &ap->link.device[0]; in ata_find_dev()
2737 return &ap->link.device[devno]; in ata_find_dev()
2743 * For PMP-attached devices, the device number corresponds to C in ata_find_dev()
2747 if (devno < ap->nr_pmp_links) in ata_find_dev()
2748 return &ap->pmp_link[devno].device[0]; in ata_find_dev()
2760 if (unlikely(scsidev->channel || scsidev->lun)) in __ata_scsi_find_dev()
2762 devno = scsidev->id; in __ata_scsi_find_dev()
2764 if (unlikely(scsidev->id || scsidev->lun)) in __ata_scsi_find_dev()
2766 devno = scsidev->channel; in __ata_scsi_find_dev()
2773 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2800 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
2801 * @byte1: Byte 1 from pass-thru CDB.
2810 case 3: /* Non-data */ in ata_scsi_map_proto()
2814 case 10: /* UDMA Data-in */ in ata_scsi_map_proto()
2815 case 11: /* UDMA Data-Out */ in ata_scsi_map_proto()
2818 case 4: /* PIO Data-in */ in ata_scsi_map_proto()
2819 case 5: /* PIO Data-out */ in ata_scsi_map_proto()
2839 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
2842 * Handles either 12, 16, or 32-byte versions of the CDB.
2845 * Zero on success, non-zero on failure.
2849 struct ata_taskfile *tf = &(qc->tf); in ata_scsi_pass_thru() local
2850 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_pass_thru()
2851 struct ata_device *dev = qc->dev; in ata_scsi_pass_thru()
2852 const u8 *cdb = scmd->cmnd; in ata_scsi_pass_thru()
2856 /* 7Fh variable length cmd means a ata pass-thru(32) */ in ata_scsi_pass_thru()
2860 tf->protocol = ata_scsi_map_proto(cdb[1 + cdb_offset]); in ata_scsi_pass_thru()
2861 if (tf->protocol == ATA_PROT_UNKNOWN) { in ata_scsi_pass_thru()
2871 if (scmd->sc_data_direction != DMA_NONE) { in ata_scsi_pass_thru()
2876 if (ata_is_ncq(tf->protocol)) in ata_scsi_pass_thru()
2877 tf->protocol = ATA_PROT_NCQ_NODATA; in ata_scsi_pass_thru()
2881 tf->flags |= ATA_TFLAG_LBA; in ata_scsi_pass_thru()
2890 * 16-byte CDB - may contain extended commands. in ata_scsi_pass_thru()
2895 tf->hob_feature = cdb[3]; in ata_scsi_pass_thru()
2896 tf->hob_nsect = cdb[5]; in ata_scsi_pass_thru()
2897 tf->hob_lbal = cdb[7]; in ata_scsi_pass_thru()
2898 tf->hob_lbam = cdb[9]; in ata_scsi_pass_thru()
2899 tf->hob_lbah = cdb[11]; in ata_scsi_pass_thru()
2900 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2902 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2907 tf->feature = cdb[4]; in ata_scsi_pass_thru()
2908 tf->nsect = cdb[6]; in ata_scsi_pass_thru()
2909 tf->lbal = cdb[8]; in ata_scsi_pass_thru()
2910 tf->lbam = cdb[10]; in ata_scsi_pass_thru()
2911 tf->lbah = cdb[12]; in ata_scsi_pass_thru()
2912 tf->device = cdb[13]; in ata_scsi_pass_thru()
2913 tf->command = cdb[14]; in ata_scsi_pass_thru()
2917 * 12-byte CDB - incapable of extended commands. in ata_scsi_pass_thru()
2919 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2921 tf->feature = cdb[3]; in ata_scsi_pass_thru()
2922 tf->nsect = cdb[4]; in ata_scsi_pass_thru()
2923 tf->lbal = cdb[5]; in ata_scsi_pass_thru()
2924 tf->lbam = cdb[6]; in ata_scsi_pass_thru()
2925 tf->lbah = cdb[7]; in ata_scsi_pass_thru()
2926 tf->device = cdb[8]; in ata_scsi_pass_thru()
2927 tf->command = cdb[9]; in ata_scsi_pass_thru()
2931 * 32-byte CDB - may contain extended command fields. in ata_scsi_pass_thru()
2936 tf->hob_feature = cdb[20]; in ata_scsi_pass_thru()
2937 tf->hob_nsect = cdb[22]; in ata_scsi_pass_thru()
2938 tf->hob_lbal = cdb[16]; in ata_scsi_pass_thru()
2939 tf->hob_lbam = cdb[15]; in ata_scsi_pass_thru()
2940 tf->hob_lbah = cdb[14]; in ata_scsi_pass_thru()
2941 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2943 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2945 tf->feature = cdb[21]; in ata_scsi_pass_thru()
2946 tf->nsect = cdb[23]; in ata_scsi_pass_thru()
2947 tf->lbal = cdb[19]; in ata_scsi_pass_thru()
2948 tf->lbam = cdb[18]; in ata_scsi_pass_thru()
2949 tf->lbah = cdb[17]; in ata_scsi_pass_thru()
2950 tf->device = cdb[24]; in ata_scsi_pass_thru()
2951 tf->command = cdb[25]; in ata_scsi_pass_thru()
2952 tf->auxiliary = get_unaligned_be32(&cdb[28]); in ata_scsi_pass_thru()
2957 if (ata_is_ncq(tf->protocol)) in ata_scsi_pass_thru()
2958 tf->nsect = qc->hw_tag << 3; in ata_scsi_pass_thru()
2961 tf->device = dev->devno ? in ata_scsi_pass_thru()
2962 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; in ata_scsi_pass_thru()
2964 switch (tf->command) { in ata_scsi_pass_thru()
2965 /* READ/WRITE LONG use a non-standard sect_size */ in ata_scsi_pass_thru()
2970 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) { in ata_scsi_pass_thru()
2974 qc->sect_size = scsi_bufflen(scmd); in ata_scsi_pass_thru()
3008 qc->sect_size = scmd->device->sector_size; in ata_scsi_pass_thru()
3013 qc->sect_size = ATA_SECT_SIZE; in ata_scsi_pass_thru()
3018 * write indication (used for PIO/DMA setup), result TF is in ata_scsi_pass_thru()
3021 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_scsi_pass_thru()
3022 if (scmd->sc_data_direction == DMA_TO_DEVICE) in ata_scsi_pass_thru()
3023 tf->flags |= ATA_TFLAG_WRITE; in ata_scsi_pass_thru()
3025 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; in ata_scsi_pass_thru()
3036 if (tf->protocol == ATA_PROT_DMA && !ata_dma_enabled(dev)) { in ata_scsi_pass_thru()
3042 if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) { in ata_scsi_pass_thru()
3048 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { in ata_scsi_pass_thru()
3053 if (is_multi_taskfile(tf)) { in ata_scsi_pass_thru()
3059 if (multi_count != dev->multi_count) in ata_scsi_pass_thru()
3065 * Filter SET_FEATURES - XFER MODE command -- otherwise, in ata_scsi_pass_thru()
3066 * SET_FEATURES - XFER MODE must be preceded/succeeded in ata_scsi_pass_thru()
3067 * by an update to hardware-specific registers for each in ata_scsi_pass_thru()
3068 * controller (i.e. the reason for ->set_piomode(), in ata_scsi_pass_thru()
3069 * ->set_dmamode(), and ->post_set_mode() hooks). in ata_scsi_pass_thru()
3071 if (tf->command == ATA_CMD_SET_FEATURES && in ata_scsi_pass_thru()
3072 tf->feature == SETFEATURES_XFER) { in ata_scsi_pass_thru()
3081 * have a real reason for wanting to use them. This ensures in ata_scsi_pass_thru()
3086 * Note that for ATA8 we can issue a DCS change and DCS freeze lock in ata_scsi_pass_thru()
3092 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) { in ata_scsi_pass_thru()
3105 * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
3111 * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
3128 struct scsi_device *sdp = cmd->device; in ata_format_dsm_trim_descr()
3129 size_t len = sdp->sector_size; in ata_format_dsm_trim_descr()
3149 count -= 0xffff; in ata_format_dsm_trim_descr()
3159 * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
3162 * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
3166 * - When set translate to DSM TRIM
3167 * - When clear translate to SCT Write Same
3171 struct ata_taskfile *tf = &qc->tf; in ata_scsi_write_same_xlat() local
3172 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_write_same_xlat()
3173 struct scsi_device *sdp = scmd->device; in ata_scsi_write_same_xlat()
3174 size_t len = sdp->sector_size; in ata_scsi_write_same_xlat()
3175 struct ata_device *dev = qc->dev; in ata_scsi_write_same_xlat()
3176 const u8 *cdb = scmd->cmnd; in ata_scsi_write_same_xlat()
3197 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_write_same_xlat()
3204 (dev->horkage & ATA_HORKAGE_NOTRIM) || in ata_scsi_write_same_xlat()
3205 !ata_id_has_trim(dev->id)) { in ata_scsi_write_same_xlat()
3217 * WRITE SAME always has a sector sized buffer as payload, this in ata_scsi_write_same_xlat()
3218 * should never be a multiple entry S/G list. in ata_scsi_write_same_xlat()
3225 * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count) in ata_scsi_write_same_xlat()
3235 tf->protocol = ATA_PROT_NCQ; in ata_scsi_write_same_xlat()
3236 tf->command = ATA_CMD_FPDMA_SEND; in ata_scsi_write_same_xlat()
3237 tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; in ata_scsi_write_same_xlat()
3238 tf->nsect = qc->hw_tag << 3; in ata_scsi_write_same_xlat()
3239 tf->hob_feature = (size / 512) >> 8; in ata_scsi_write_same_xlat()
3240 tf->feature = size / 512; in ata_scsi_write_same_xlat()
3242 tf->auxiliary = 1; in ata_scsi_write_same_xlat()
3244 tf->protocol = ATA_PROT_DMA; in ata_scsi_write_same_xlat()
3245 tf->hob_feature = 0; in ata_scsi_write_same_xlat()
3246 tf->feature = ATA_DSM_TRIM; in ata_scsi_write_same_xlat()
3247 tf->hob_nsect = (size / 512) >> 8; in ata_scsi_write_same_xlat()
3248 tf->nsect = size / 512; in ata_scsi_write_same_xlat()
3249 tf->command = ATA_CMD_DSM; in ata_scsi_write_same_xlat()
3252 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | in ata_scsi_write_same_xlat()
3273 * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
3277 * Yields a subset to satisfy scsi_report_opcode()
3284 struct ata_device *dev = args->dev; in ata_scsiop_maint_in()
3285 u8 *cdb = args->cmd->cmnd; in ata_scsiop_maint_in()
3326 if (dev->flags & ATA_DFLAG_CDL) { in ata_scsiop_maint_in()
3337 if (dev->flags & ATA_DFLAG_CDL) { in ata_scsiop_maint_in()
3348 if (ata_id_zoned_cap(dev->id) || in ata_scsiop_maint_in()
3349 dev->class == ATA_DEV_ZAC) in ata_scsiop_maint_in()
3354 if (dev->flags & ATA_DFLAG_TRUSTED) in ata_scsiop_maint_in()
3368 * ata_scsi_report_zones_complete - convert ATA output
3371 * Convert T-13 little-endian field representation into
3372 * T-10 big-endian field representation.
3373 * What a mess.
3377 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_report_zones_complete()
3440 struct ata_taskfile *tf = &qc->tf; in ata_scsi_zbc_in_xlat() local
3441 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_zbc_in_xlat()
3442 const u8 *cdb = scmd->cmnd; in ata_scsi_zbc_in_xlat()
3443 u16 sect, fp = (u16)-1; in ata_scsi_zbc_in_xlat()
3448 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_zbc_in_xlat()
3449 ata_dev_warn(qc->dev, "invalid cdb length %d\n", in ata_scsi_zbc_in_xlat()
3450 scmd->cmd_len); in ata_scsi_zbc_in_xlat()
3456 ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n", in ata_scsi_zbc_in_xlat()
3462 ata_dev_warn(qc->dev, "invalid service action %d\n", sa); in ata_scsi_zbc_in_xlat()
3468 * and uses a 16 bit value for the transfer count. in ata_scsi_zbc_in_xlat()
3471 ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block); in ata_scsi_zbc_in_xlat()
3477 if (ata_ncq_enabled(qc->dev) && in ata_scsi_zbc_in_xlat()
3478 ata_fpdma_zac_mgmt_in_supported(qc->dev)) { in ata_scsi_zbc_in_xlat()
3479 tf->protocol = ATA_PROT_NCQ; in ata_scsi_zbc_in_xlat()
3480 tf->command = ATA_CMD_FPDMA_RECV; in ata_scsi_zbc_in_xlat()
3481 tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f; in ata_scsi_zbc_in_xlat()
3482 tf->nsect = qc->hw_tag << 3; in ata_scsi_zbc_in_xlat()
3483 tf->feature = sect & 0xff; in ata_scsi_zbc_in_xlat()
3484 tf->hob_feature = (sect >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3485 tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8); in ata_scsi_zbc_in_xlat()
3487 tf->command = ATA_CMD_ZAC_MGMT_IN; in ata_scsi_zbc_in_xlat()
3488 tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES; in ata_scsi_zbc_in_xlat()
3489 tf->protocol = ATA_PROT_DMA; in ata_scsi_zbc_in_xlat()
3490 tf->hob_feature = options; in ata_scsi_zbc_in_xlat()
3491 tf->hob_nsect = (sect >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3492 tf->nsect = sect & 0xff; in ata_scsi_zbc_in_xlat()
3494 tf->device = ATA_LBA; in ata_scsi_zbc_in_xlat()
3495 tf->lbah = (block >> 16) & 0xff; in ata_scsi_zbc_in_xlat()
3496 tf->lbam = (block >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3497 tf->lbal = block & 0xff; in ata_scsi_zbc_in_xlat()
3498 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_zbc_in_xlat()
3499 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_zbc_in_xlat()
3500 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_zbc_in_xlat()
3502 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; in ata_scsi_zbc_in_xlat()
3503 qc->flags |= ATA_QCFLAG_RESULT_TF; in ata_scsi_zbc_in_xlat()
3507 qc->complete_fn = ata_scsi_report_zones_complete; in ata_scsi_zbc_in_xlat()
3512 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_zbc_in_xlat()
3517 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_zbc_in_xlat()
3523 struct ata_taskfile *tf = &qc->tf; in ata_scsi_zbc_out_xlat() local
3524 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_zbc_out_xlat()
3525 struct ata_device *dev = qc->dev; in ata_scsi_zbc_out_xlat()
3526 const u8 *cdb = scmd->cmnd; in ata_scsi_zbc_out_xlat()
3530 u16 fp = (u16)-1; in ata_scsi_zbc_out_xlat()
3532 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_zbc_out_xlat()
3558 } else if (block >= dev->n_sectors) { in ata_scsi_zbc_out_xlat()
3560 * Block must be a valid zone ID (a zone start LBA). in ata_scsi_zbc_out_xlat()
3566 if (ata_ncq_enabled(qc->dev) && in ata_scsi_zbc_out_xlat()
3567 ata_fpdma_zac_mgmt_out_supported(qc->dev)) { in ata_scsi_zbc_out_xlat()
3568 tf->protocol = ATA_PROT_NCQ_NODATA; in ata_scsi_zbc_out_xlat()
3569 tf->command = ATA_CMD_NCQ_NON_DATA; in ata_scsi_zbc_out_xlat()
3570 tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT; in ata_scsi_zbc_out_xlat()
3571 tf->nsect = qc->hw_tag << 3; in ata_scsi_zbc_out_xlat()
3572 tf->auxiliary = sa | ((u16)all << 8); in ata_scsi_zbc_out_xlat()
3574 tf->protocol = ATA_PROT_NODATA; in ata_scsi_zbc_out_xlat()
3575 tf->command = ATA_CMD_ZAC_MGMT_OUT; in ata_scsi_zbc_out_xlat()
3576 tf->feature = sa; in ata_scsi_zbc_out_xlat()
3577 tf->hob_feature = all; in ata_scsi_zbc_out_xlat()
3579 tf->lbah = (block >> 16) & 0xff; in ata_scsi_zbc_out_xlat()
3580 tf->lbam = (block >> 8) & 0xff; in ata_scsi_zbc_out_xlat()
3581 tf->lbal = block & 0xff; in ata_scsi_zbc_out_xlat()
3582 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_zbc_out_xlat()
3583 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_zbc_out_xlat()
3584 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_zbc_out_xlat()
3585 tf->device = ATA_LBA; in ata_scsi_zbc_out_xlat()
3586 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; in ata_scsi_zbc_out_xlat()
3591 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_zbc_out_xlat()
3595 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_zbc_out_xlat()
3600 * ata_mselect_caching - Simulate MODE SELECT for caching info page
3606 * Prepare a taskfile to modify caching information for the device.
3614 struct ata_taskfile *tf = &qc->tf; in ata_mselect_caching() local
3615 struct ata_device *dev = qc->dev; in ata_mselect_caching()
3621 * The first two bytes of def_cache_mpage are a header, so offsets in ata_mselect_caching()
3625 if (len != CACHE_MPAGE_LEN - 2) { in ata_mselect_caching()
3626 *fp = min(len, CACHE_MPAGE_LEN - 2); in ata_mselect_caching()
3627 return -EINVAL; in ata_mselect_caching()
3633 * Check that read-only bits are not modified. in ata_mselect_caching()
3635 ata_msense_caching(dev->id, mpage, false); in ata_mselect_caching()
3636 for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) { in ata_mselect_caching()
3641 return -EINVAL; in ata_mselect_caching()
3645 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_mselect_caching()
3646 tf->protocol = ATA_PROT_NODATA; in ata_mselect_caching()
3647 tf->nsect = 0; in ata_mselect_caching()
3648 tf->command = ATA_CMD_SET_FEATURES; in ata_mselect_caching()
3649 tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF; in ata_mselect_caching()
3654 * Simulate MODE SELECT control mode page, sub-page 0.
3659 struct ata_device *dev = qc->dev; in ata_mselect_control_spg0()
3665 * The first two bytes of def_control_mpage are a header, so offsets in ata_mselect_control_spg0()
3669 if (len != CONTROL_MPAGE_LEN - 2) { in ata_mselect_control_spg0()
3670 *fp = min(len, CONTROL_MPAGE_LEN - 2); in ata_mselect_control_spg0()
3671 return -EINVAL; in ata_mselect_control_spg0()
3677 * Check that read-only bits are not modified. in ata_mselect_control_spg0()
3680 for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) { in ata_mselect_control_spg0()
3685 return -EINVAL; in ata_mselect_control_spg0()
3689 dev->flags |= ATA_DFLAG_D_SENSE; in ata_mselect_control_spg0()
3691 dev->flags &= ~ATA_DFLAG_D_SENSE; in ata_mselect_control_spg0()
3696 * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
3697 * page) into a SET FEATURES command.
3703 struct ata_device *dev = qc->dev; in ata_mselect_control_ata_feature()
3704 struct ata_taskfile *tf = &qc->tf; in ata_mselect_control_ata_feature() local
3708 * The first four bytes of ATA Feature Control mode page are a header, in ata_mselect_control_ata_feature()
3711 if (len != ATA_FEATURE_SUB_MPAGE_LEN - 4) { in ata_mselect_control_ata_feature()
3712 *fp = min(len, ATA_FEATURE_SUB_MPAGE_LEN - 4); in ata_mselect_control_ata_feature()
3713 return -EINVAL; in ata_mselect_control_ata_feature()
3721 dev->flags &= ~ATA_DFLAG_CDL_ENABLED; in ata_mselect_control_ata_feature()
3725 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { in ata_mselect_control_ata_feature()
3728 return -EINVAL; in ata_mselect_control_ata_feature()
3731 dev->flags |= ATA_DFLAG_CDL_ENABLED; in ata_mselect_control_ata_feature()
3735 return -EINVAL; in ata_mselect_control_ata_feature()
3738 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_mselect_control_ata_feature()
3739 tf->protocol = ATA_PROT_NODATA; in ata_mselect_control_ata_feature()
3740 tf->command = ATA_CMD_SET_FEATURES; in ata_mselect_control_ata_feature()
3741 tf->feature = SETFEATURES_CDL; in ata_mselect_control_ata_feature()
3742 tf->nsect = cdl_action; in ata_mselect_control_ata_feature()
3748 * ata_mselect_control - Simulate MODE SELECT for control page
3750 * @spg: target sub-page of the control page
3755 * Prepare a taskfile to modify caching information for the device.
3769 return -EINVAL; in ata_mselect_control()
3774 * ata_scsi_mode_select_xlat - Simulate MODE SELECT 6, 10 commands
3777 * Converts a MODE SELECT command to an ATA SET FEATURES taskfile.
3786 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_mode_select_xlat()
3787 const u8 *cdb = scmd->cmnd; in ata_scsi_mode_select_xlat()
3791 u16 fp = (u16)-1; in ata_scsi_mode_select_xlat()
3798 if (scmd->cmd_len < 5) { in ata_scsi_mode_select_xlat()
3806 if (scmd->cmd_len < 9) { in ata_scsi_mode_select_xlat()
3823 if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) in ata_scsi_mode_select_xlat()
3839 len -= hdr_len; in ata_scsi_mode_select_xlat()
3849 len -= bd_len; in ata_scsi_mode_select_xlat()
3863 len -= 4; in ata_scsi_mode_select_xlat()
3871 len -= 2; in ata_scsi_mode_select_xlat()
3875 * Supported subpages: all subpages and ATA feature sub-page f2h of in ata_scsi_mode_select_xlat()
3889 if (qc->dev->flags & ATA_DFLAG_CDL && in ata_scsi_mode_select_xlat()
3926 * page at a time. in ata_scsi_mode_select_xlat()
3934 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_mode_select_xlat()
3938 ata_scsi_set_invalid_parameter(qc->dev, scmd, fp); in ata_scsi_mode_select_xlat()
3943 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_mode_select_xlat()
3947 scmd->result = SAM_STAT_GOOD; in ata_scsi_mode_select_xlat()
3963 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_security_inout_xlat()
3964 const u8 *cdb = scmd->cmnd; in ata_scsi_security_inout_xlat()
3965 struct ata_taskfile *tf = &qc->tf; in ata_scsi_security_inout_xlat() local
3970 bool dma = !(qc->dev->flags & ATA_DFLAG_PIO); in ata_scsi_security_inout_xlat()
3976 ata_scsi_set_invalid_field(qc->dev, scmd, 1, 0); in ata_scsi_security_inout_xlat()
3982 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); in ata_scsi_security_inout_xlat()
3987 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); in ata_scsi_security_inout_xlat()
3991 /* convert to the sector-based ATA addressing */ in ata_scsi_security_inout_xlat()
3995 tf->protocol = dma ? ATA_PROT_DMA : ATA_PROT_PIO; in ata_scsi_security_inout_xlat()
3996 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR | ATA_TFLAG_LBA; in ata_scsi_security_inout_xlat()
3998 tf->flags |= ATA_TFLAG_WRITE; in ata_scsi_security_inout_xlat()
3999 tf->command = ata_scsi_trusted_op(len, send, dma); in ata_scsi_security_inout_xlat()
4000 tf->feature = secp; in ata_scsi_security_inout_xlat()
4001 tf->lbam = spsp & 0xff; in ata_scsi_security_inout_xlat()
4002 tf->lbah = spsp >> 8; in ata_scsi_security_inout_xlat()
4005 tf->nsect = len & 0xff; in ata_scsi_security_inout_xlat()
4006 tf->lbal = len >> 8; in ata_scsi_security_inout_xlat()
4009 tf->lbah = (1 << 7); in ata_scsi_security_inout_xlat()
4017 * ata_scsi_var_len_cdb_xlat - SATL variable length CDB to Handler
4020 * Translate a SCSI variable length CDB to specified commands.
4021 * It checks a service action value in CDB to call corresponding handler.
4024 * Zero on success, non-zero on failure
4029 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_var_len_cdb_xlat()
4030 const u8 *cdb = scmd->cmnd; in ata_scsi_var_len_cdb_xlat()
4034 * if service action represents a ata pass-thru(32) command, in ata_scsi_var_len_cdb_xlat()
4045 * ata_get_xlat_func - check if SCSI to ATA translation is possible
4100 if (!(dev->flags & ATA_DFLAG_TRUSTED)) in ata_get_xlat_func()
4113 struct ata_port *ap = dev->link->ap; in __ata_scsi_queuecmd()
4114 u8 scsi_op = scmd->cmnd[0]; in __ata_scsi_queuecmd()
4119 * However, this check is done without holding the ap->lock (a libata in __ata_scsi_queuecmd()
4121 * therefore we must check if EH is pending, while holding ap->lock. in __ata_scsi_queuecmd()
4123 if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) in __ata_scsi_queuecmd()
4126 if (unlikely(!scmd->cmd_len)) in __ata_scsi_queuecmd()
4129 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { in __ata_scsi_queuecmd()
4130 if (unlikely(scmd->cmd_len > dev->cdb_len)) in __ata_scsi_queuecmd()
4138 if (unlikely(len > scmd->cmd_len || in __ata_scsi_queuecmd()
4139 len > dev->cdb_len || in __ata_scsi_queuecmd()
4140 scmd->cmd_len > ATAPI_CDB_LEN)) in __ata_scsi_queuecmd()
4146 if (unlikely(scmd->cmd_len > 16)) in __ata_scsi_queuecmd()
4160 scmd->result = DID_ERROR << 16; in __ata_scsi_queuecmd()
4166 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
4172 * hardware. In other cases, this function simulates a
4188 struct scsi_device *scsidev = cmd->device; in ata_scsi_queuecmd()
4194 spin_lock_irqsave(ap->lock, irq_flags); in ata_scsi_queuecmd()
4200 cmd->result = (DID_BAD_TARGET << 16); in ata_scsi_queuecmd()
4204 spin_unlock_irqrestore(ap->lock, irq_flags); in ata_scsi_queuecmd()
4211 * ata_scsi_simulate - simulate SCSI command on ATA device
4215 * Interprets and directly executes a select list of SCSI commands
4225 const u8 *scsicmd = cmd->cmnd; in ata_scsi_simulate()
4229 args.id = dev->id; in ata_scsi_simulate()
4261 if (dev->flags & ATA_DFLAG_ZAC) in ata_scsi_simulate()
4267 if (dev->cpr_log) in ata_scsi_simulate()
4303 * turning this into a no-op. in ata_scsi_simulate()
4309 /* no-op's, complete with success */ in ata_scsi_simulate()
4343 for (i = 0; i < host->n_ports; i++) { in ata_scsi_add_hosts()
4344 struct ata_port *ap = host->ports[i]; in ata_scsi_add_hosts()
4347 rc = -ENOMEM; in ata_scsi_add_hosts()
4352 shost->eh_noresume = 1; in ata_scsi_add_hosts()
4353 *(struct ata_port **)&shost->hostdata[0] = ap; in ata_scsi_add_hosts()
4354 ap->scsi_host = shost; in ata_scsi_add_hosts()
4356 shost->transportt = ata_scsi_transport_template; in ata_scsi_add_hosts()
4357 shost->unique_id = ap->print_id; in ata_scsi_add_hosts()
4358 shost->max_id = 16; in ata_scsi_add_hosts()
4359 shost->max_lun = 1; in ata_scsi_add_hosts()
4360 shost->max_channel = 1; in ata_scsi_add_hosts()
4361 shost->max_cmd_len = 32; in ata_scsi_add_hosts()
4363 /* Schedule policy is determined by ->qc_defer() in ata_scsi_add_hosts()
4368 shost->max_host_blocked = 1; in ata_scsi_add_hosts()
4370 rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); in ata_scsi_add_hosts()
4378 while (--i >= 0) { in ata_scsi_add_hosts()
4379 struct Scsi_Host *shost = host->ports[i]->scsi_host; in ata_scsi_add_hosts()
4390 struct scsi_device *sdev = dev->sdev; in ata_scsi_assign_ofnode()
4391 struct device *d = ap->host->dev; in ata_scsi_assign_ofnode()
4392 struct device_node *np = d->of_node; in ata_scsi_assign_ofnode()
4402 if (val == dev->devno) { in ata_scsi_assign_ofnode()
4404 sdev->sdev_gendev.of_node = child; in ata_scsi_assign_ofnode()
4428 if (dev->sdev) in ata_scsi_scan_host()
4432 id = dev->devno; in ata_scsi_scan_host()
4434 channel = link->pmp; in ata_scsi_scan_host()
4436 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, in ata_scsi_scan_host()
4439 dev->sdev = sdev; in ata_scsi_scan_host()
4443 dev->sdev = NULL; in ata_scsi_scan_host()
4454 if (!dev->sdev) in ata_scsi_scan_host()
4474 * a few more chances. in ata_scsi_scan_host()
4476 if (--tries) { in ata_scsi_scan_host()
4485 queue_delayed_work(system_long_wq, &ap->hotplug_task, in ata_scsi_scan_host()
4490 * ata_scsi_offline_dev - offline attached SCSI device
4495 * function is called with host lock which protects dev->sdev
4506 if (dev->sdev) { in ata_scsi_offline_dev()
4507 scsi_device_set_state(dev->sdev, SDEV_OFFLINE); in ata_scsi_offline_dev()
4514 * ata_scsi_remove_dev - remove attached SCSI device
4525 struct ata_port *ap = dev->link->ap; in ata_scsi_remove_dev()
4535 mutex_lock(&ap->scsi_host->scan_mutex); in ata_scsi_remove_dev()
4536 spin_lock_irqsave(ap->lock, flags); in ata_scsi_remove_dev()
4538 /* clearing dev->sdev is protected by host lock */ in ata_scsi_remove_dev()
4539 sdev = dev->sdev; in ata_scsi_remove_dev()
4540 dev->sdev = NULL; in ata_scsi_remove_dev()
4560 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_remove_dev()
4561 mutex_unlock(&ap->scsi_host->scan_mutex); in ata_scsi_remove_dev()
4565 dev_name(&sdev->sdev_gendev)); in ata_scsi_remove_dev()
4574 struct ata_port *ap = link->ap; in ata_scsi_handle_link_detach()
4580 if (!(dev->flags & ATA_DFLAG_DETACHED)) in ata_scsi_handle_link_detach()
4583 spin_lock_irqsave(ap->lock, flags); in ata_scsi_handle_link_detach()
4584 dev->flags &= ~ATA_DFLAG_DETACHED; in ata_scsi_handle_link_detach()
4585 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_handle_link_detach()
4595 * ata_scsi_media_change_notify - send media change event
4598 * Tell the block layer to send a media change notification
4606 if (dev->sdev) in ata_scsi_media_change_notify()
4607 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE, in ata_scsi_media_change_notify()
4612 * ata_scsi_hotplug - SCSI part of hotplug
4615 * Perform SCSI part of hotplug. It's executed from a separate
4618 * synchronized with hot plugging with a mutex.
4629 if (ap->pflags & ATA_PFLAG_UNLOADING) in ata_scsi_hotplug()
4632 mutex_lock(&ap->scsi_scan_mutex); in ata_scsi_hotplug()
4638 ata_scsi_handle_link_detach(&ap->link); in ata_scsi_hotplug()
4639 if (ap->pmp_link) in ata_scsi_hotplug()
4641 ata_scsi_handle_link_detach(&ap->pmp_link[i]); in ata_scsi_hotplug()
4646 mutex_unlock(&ap->scsi_scan_mutex); in ata_scsi_hotplug()
4650 * ata_scsi_user_scan - indication for user-initiated bus scan
4673 return -EINVAL; in ata_scsi_user_scan()
4677 return -EINVAL; in ata_scsi_user_scan()
4681 return -EINVAL; in ata_scsi_user_scan()
4685 spin_lock_irqsave(ap->lock, flags); in ata_scsi_user_scan()
4691 struct ata_eh_info *ehi = &link->eh_info; in ata_scsi_user_scan()
4692 ehi->probe_mask |= ATA_ALL_DEVICES; in ata_scsi_user_scan()
4693 ehi->action |= ATA_EH_RESET; in ata_scsi_user_scan()
4699 struct ata_eh_info *ehi = &dev->link->eh_info; in ata_scsi_user_scan()
4700 ehi->probe_mask |= 1 << dev->devno; in ata_scsi_user_scan()
4701 ehi->action |= ATA_EH_RESET; in ata_scsi_user_scan()
4703 rc = -EINVAL; in ata_scsi_user_scan()
4708 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_user_scan()
4711 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_user_scan()
4717 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
4735 mutex_lock(&ap->scsi_scan_mutex); in ata_scsi_dev_rescan()
4736 spin_lock_irqsave(ap->lock, flags); in ata_scsi_dev_rescan()
4740 struct scsi_device *sdev = dev->sdev; in ata_scsi_dev_rescan()
4746 if (ap->pflags & ATA_PFLAG_SUSPENDED) in ata_scsi_dev_rescan()
4754 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_dev_rescan()
4757 spin_lock_irqsave(ap->lock, flags); in ata_scsi_dev_rescan()
4765 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_dev_rescan()
4766 mutex_unlock(&ap->scsi_scan_mutex); in ata_scsi_dev_rescan()
4768 /* Reschedule with a delay if scsi_rescan_device() returned an error */ in ata_scsi_dev_rescan()
4770 schedule_delayed_work(&ap->scsi_rescan_task, in ata_scsi_dev_rescan()