Lines Matching +full:- +full:- +full:retry +full:- +full:all +full:- +full:errors
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-eh.c - libata error handling
8 * as Documentation/driver-api/libata.rst
11 * http://www.sata-io.org/
59 /* Waiting in ->prereset can never be reliable. It's
76 * hardreset. All others are hardreset if available. In most cases
97 15000, /* Some drives are slow to read log pages when waking-up */
125 * On the retry after a command timed out, the next timeout value from
129 * ehc->cmd_timeout_idx keeps track of which timeout to use per
172 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, in __ata_ehi_pushv_desc()
173 ATA_EH_DESC_LEN - ehi->desc_len, in __ata_ehi_pushv_desc()
178 * __ata_ehi_push_desc - push error description without adding separator
182 * Format string according to @fmt and append it to @ehi->desc.
198 * ata_ehi_push_desc - push error description with separator
202 * Format string according to @fmt and append it to @ehi->desc.
203 * If @ehi->desc is not empty, ", " is added in-between.
212 if (ehi->desc_len) in ata_ehi_push_desc()
222 * ata_ehi_clear_desc - clean error description
225 * Clear @ehi->desc.
232 ehi->desc[0] = '\0'; in ata_ehi_clear_desc()
233 ehi->desc_len = 0; in ata_ehi_clear_desc()
238 * ata_port_desc - append port description
244 * in-between. This function is to be used while initializing
254 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); in ata_port_desc()
256 if (ap->link.eh_info.desc_len) in ata_port_desc()
257 __ata_ehi_push_desc(&ap->link.eh_info, " "); in ata_port_desc()
260 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); in ata_port_desc()
267 * ata_port_pbar_desc - append PCI BAR description
284 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in ata_port_pbar_desc()
317 return -1; in ata_lookup_timeout_table()
321 * ata_internal_cmd_timeout - determine timeout for an internal command
335 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_internal_cmd_timeout()
342 idx = ehc->cmd_timeout_idx[dev->devno][ent]; in ata_internal_cmd_timeout()
347 * ata_internal_cmd_timed_out - notification for internal command timeout
360 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_internal_cmd_timed_out()
367 idx = ehc->cmd_timeout_idx[dev->devno][ent]; in ata_internal_cmd_timed_out()
369 ehc->cmd_timeout_idx[dev->devno][ent]++; in ata_internal_cmd_timed_out()
379 ering->cursor++; in ata_ering_record()
380 ering->cursor %= ATA_ERING_SIZE; in ata_ering_record()
382 ent = &ering->ring[ering->cursor]; in ata_ering_record()
383 ent->eflags = eflags; in ata_ering_record()
384 ent->err_mask = err_mask; in ata_ering_record()
385 ent->timestamp = get_jiffies_64(); in ata_ering_record()
390 struct ata_ering_entry *ent = &ering->ring[ering->cursor]; in ata_ering_top()
392 if (ent->err_mask) in ata_ering_top()
404 idx = ering->cursor; in ata_ering_map()
406 ent = &ering->ring[idx]; in ata_ering_map()
407 if (!ent->err_mask) in ata_ering_map()
412 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; in ata_ering_map()
413 } while (idx != ering->cursor); in ata_ering_map()
420 ent->eflags |= ATA_EFLAG_OLD_ER; in ata_ering_clear_cb()
431 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_dev_action()
433 return ehc->i.action | ehc->i.dev_action[dev->devno]; in ata_eh_dev_action()
442 ehi->action &= ~action; in ata_eh_clear_action()
443 ata_for_each_dev(tdev, link, ALL) in ata_eh_clear_action()
444 ehi->dev_action[tdev->devno] &= ~action; in ata_eh_clear_action()
446 /* doesn't make sense for port-wide EH actions */ in ata_eh_clear_action()
449 /* break ehi->action into ehi->dev_action */ in ata_eh_clear_action()
450 if (ehi->action & action) { in ata_eh_clear_action()
451 ata_for_each_dev(tdev, link, ALL) in ata_eh_clear_action()
452 ehi->dev_action[tdev->devno] |= in ata_eh_clear_action()
453 ehi->action & action; in ata_eh_clear_action()
454 ehi->action &= ~action; in ata_eh_clear_action()
457 /* turn off the specified per-dev action */ in ata_eh_clear_action()
458 ehi->dev_action[dev->devno] &= ~action; in ata_eh_clear_action()
463 * ata_eh_acquire - acquire EH ownership
475 mutex_lock(&ap->host->eh_mutex); in ata_eh_acquire()
476 WARN_ON_ONCE(ap->host->eh_owner); in ata_eh_acquire()
477 ap->host->eh_owner = current; in ata_eh_acquire()
481 * ata_eh_release - release EH ownership
492 WARN_ON_ONCE(ap->host->eh_owner != current); in ata_eh_release()
493 ap->host->eh_owner = NULL; in ata_eh_release()
494 mutex_unlock(&ap->host->eh_mutex); in ata_eh_release()
504 * Unless we are restarting, transition all enabled devices to in ata_eh_unload()
519 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); in ata_eh_unload()
520 ata_for_each_dev(dev, link, ALL) in ata_eh_unload()
525 spin_lock_irqsave(ap->lock, flags); in ata_eh_unload()
528 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ in ata_eh_unload()
529 ap->pflags |= ATA_PFLAG_UNLOADED; in ata_eh_unload()
531 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_unload()
535 * ata_scsi_error - SCSI layer error handler callback
538 * Handles SCSI-layer-thrown error events.
552 spin_lock_irqsave(host->host_lock, flags); in ata_scsi_error()
553 list_splice_init(&host->eh_cmd_q, &eh_work_q); in ata_scsi_error()
554 spin_unlock_irqrestore(host->host_lock, flags); in ata_scsi_error()
562 /* finish or retry handled scmd's and clean up */ in ata_scsi_error()
568 * ata_scsi_cmd_error_handler - error callback for a list of commands
574 * ap->eh_done_q. This function is the first part of the libata error
591 * For EH, all qcs are finished in one of three ways - in ata_scsi_cmd_error_handler()
603 spin_lock_irqsave(ap->lock, flags); in ata_scsi_cmd_error_handler()
606 * This must occur under the ap->lock as we don't want in ata_scsi_cmd_error_handler()
610 * non-notified command and completes much like an IRQ handler. in ata_scsi_cmd_error_handler()
615 if (ap->ops->lost_interrupt) in ata_scsi_cmd_error_handler()
616 ap->ops->lost_interrupt(ap); in ata_scsi_cmd_error_handler()
622 if (qc->flags & ATA_QCFLAG_ACTIVE && in ata_scsi_cmd_error_handler()
623 qc->scsicmd == scmd) in ata_scsi_cmd_error_handler()
629 if (!(qc->flags & ATA_QCFLAG_EH)) { in ata_scsi_cmd_error_handler()
631 qc->err_mask |= AC_ERR_TIMEOUT; in ata_scsi_cmd_error_handler()
632 qc->flags |= ATA_QCFLAG_EH; in ata_scsi_cmd_error_handler()
640 scmd->retries = scmd->allowed; in ata_scsi_cmd_error_handler()
641 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); in ata_scsi_cmd_error_handler()
656 ap->eh_tries = ATA_EH_MAX_TRIES; in ata_scsi_cmd_error_handler()
658 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_cmd_error_handler()
663 * ata_scsi_port_error_handler - recover the port after the commands
667 * Handle the recovery of the port @ap after all the commands
679 del_timer_sync(&ap->fastdrain_timer); in ata_scsi_port_error_handler()
685 spin_lock_irqsave(ap->lock, flags); in ata_scsi_port_error_handler()
688 struct ata_eh_context *ehc = &link->eh_context; in ata_scsi_port_error_handler()
691 memset(&link->eh_context, 0, sizeof(link->eh_context)); in ata_scsi_port_error_handler()
692 link->eh_context.i = link->eh_info; in ata_scsi_port_error_handler()
693 memset(&link->eh_info, 0, sizeof(link->eh_info)); in ata_scsi_port_error_handler()
696 int devno = dev->devno; in ata_scsi_port_error_handler()
698 ehc->saved_xfer_mode[devno] = dev->xfer_mode; in ata_scsi_port_error_handler()
700 ehc->saved_ncq_enabled |= 1 << devno; in ata_scsi_port_error_handler()
703 if (ap->pflags & ATA_PFLAG_RESUMING) in ata_scsi_port_error_handler()
704 ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE; in ata_scsi_port_error_handler()
708 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; in ata_scsi_port_error_handler()
709 ap->pflags &= ~ATA_PFLAG_EH_PENDING; in ata_scsi_port_error_handler()
710 ap->excl_link = NULL; /* don't maintain exclusion over EH */ in ata_scsi_port_error_handler()
712 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
715 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) in ata_scsi_port_error_handler()
716 ap->ops->error_handler(ap); in ata_scsi_port_error_handler()
719 if ((ap->pflags & ATA_PFLAG_UNLOADING) && in ata_scsi_port_error_handler()
720 !(ap->pflags & ATA_PFLAG_UNLOADED)) in ata_scsi_port_error_handler()
729 * Exception might have happened after ->error_handler recovered the in ata_scsi_port_error_handler()
732 spin_lock_irqsave(ap->lock, flags); in ata_scsi_port_error_handler()
734 if (ap->pflags & ATA_PFLAG_EH_PENDING) { in ata_scsi_port_error_handler()
735 if (--ap->eh_tries) { in ata_scsi_port_error_handler()
736 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
742 ap->pflags &= ~ATA_PFLAG_EH_PENDING; in ata_scsi_port_error_handler()
747 memset(&link->eh_info, 0, sizeof(link->eh_info)); in ata_scsi_port_error_handler()
750 * end eh (clear host_eh_scheduled) while holding ap->lock such that if in ata_scsi_port_error_handler()
752 * midlayer will re-initiate EH. in ata_scsi_port_error_handler()
754 ap->ops->end_eh(ap); in ata_scsi_port_error_handler()
756 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
759 scsi_eh_flush_done_q(&ap->eh_done_q); in ata_scsi_port_error_handler()
762 spin_lock_irqsave(ap->lock, flags); in ata_scsi_port_error_handler()
764 ap->pflags &= ~ATA_PFLAG_RESUMING; in ata_scsi_port_error_handler()
766 if (ap->pflags & ATA_PFLAG_LOADING) in ata_scsi_port_error_handler()
767 ap->pflags &= ~ATA_PFLAG_LOADING; in ata_scsi_port_error_handler()
768 else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) && in ata_scsi_port_error_handler()
769 !(ap->flags & ATA_FLAG_SAS_HOST)) in ata_scsi_port_error_handler()
770 schedule_delayed_work(&ap->hotplug_task, 0); in ata_scsi_port_error_handler()
772 if (ap->pflags & ATA_PFLAG_RECOVERED) in ata_scsi_port_error_handler()
775 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); in ata_scsi_port_error_handler()
778 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; in ata_scsi_port_error_handler()
779 wake_up_all(&ap->eh_wait_q); in ata_scsi_port_error_handler()
781 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
786 * ata_port_wait_eh - Wait for the currently pending EH to complete
799 retry: in ata_port_wait_eh()
800 spin_lock_irqsave(ap->lock, flags); in ata_port_wait_eh()
802 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { in ata_port_wait_eh()
803 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); in ata_port_wait_eh()
804 spin_unlock_irqrestore(ap->lock, flags); in ata_port_wait_eh()
806 spin_lock_irqsave(ap->lock, flags); in ata_port_wait_eh()
808 finish_wait(&ap->eh_wait_q, &wait); in ata_port_wait_eh()
810 spin_unlock_irqrestore(ap->lock, flags); in ata_port_wait_eh()
813 if (scsi_host_in_recovery(ap->scsi_host)) { in ata_port_wait_eh()
815 goto retry; in ata_port_wait_eh()
826 /* count only non-internal commands */ in ata_eh_nr_in_flight()
841 spin_lock_irqsave(ap->lock, flags); in ata_eh_fastdrain_timerfn()
849 if (cnt == ap->fastdrain_cnt) { in ata_eh_fastdrain_timerfn()
853 /* No progress during the last interval, tag all in ata_eh_fastdrain_timerfn()
854 * in-flight qcs as timed out and freeze the port. in ata_eh_fastdrain_timerfn()
858 qc->err_mask |= AC_ERR_TIMEOUT; in ata_eh_fastdrain_timerfn()
864 ap->fastdrain_cnt = cnt; in ata_eh_fastdrain_timerfn()
865 ap->fastdrain_timer.expires = in ata_eh_fastdrain_timerfn()
867 add_timer(&ap->fastdrain_timer); in ata_eh_fastdrain_timerfn()
871 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_fastdrain_timerfn()
875 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
880 * is non-zero and EH wasn't pending before. Fast drain ensures
891 if (ap->pflags & ATA_PFLAG_EH_PENDING) in ata_eh_set_pending()
894 ap->pflags |= ATA_PFLAG_EH_PENDING; in ata_eh_set_pending()
899 /* do we have in-flight qcs? */ in ata_eh_set_pending()
905 ap->fastdrain_cnt = cnt; in ata_eh_set_pending()
906 ap->fastdrain_timer.expires = in ata_eh_set_pending()
908 add_timer(&ap->fastdrain_timer); in ata_eh_set_pending()
912 * ata_qc_schedule_eh - schedule qc for error handling
923 struct ata_port *ap = qc->ap; in ata_qc_schedule_eh()
925 qc->flags |= ATA_QCFLAG_EH; in ata_qc_schedule_eh()
933 blk_abort_request(scsi_cmd_to_rq(qc->scsicmd)); in ata_qc_schedule_eh()
937 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
945 if (ap->pflags & ATA_PFLAG_INITIALIZING) in ata_std_sched_eh()
949 scsi_schedule_eh(ap->scsi_host); in ata_std_sched_eh()
956 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
960 * shost, so host fields can be directly manipulated under ap->lock, in
961 * the libsas case we need to hold a lock at the ha->level to coordinate
969 struct Scsi_Host *host = ap->scsi_host; in ata_std_end_eh()
971 host->host_eh_scheduled = 0; in ata_std_end_eh()
977 * ata_port_schedule_eh - schedule error handling without a qc
981 * all commands are drained.
989 ap->ops->sched_eh(ap); in ata_port_schedule_eh()
998 /* we're gonna abort all commands, no need for fast drain */ in ata_do_link_abort()
1003 if (qc && (!link || qc->dev->link == link)) { in ata_do_link_abort()
1004 qc->flags |= ATA_QCFLAG_EH; in ata_do_link_abort()
1017 * ata_link_abort - abort all qc's on the link
1020 * Abort all active qc's active on @link and schedule EH.
1030 return ata_do_link_abort(link->ap, link); in ata_link_abort()
1035 * ata_port_abort - abort all qc's on the port
1038 * Abort all active qc's of @ap and schedule EH.
1053 * __ata_port_freeze - freeze port
1061 * ap->ops->freeze() callback can be used for freezing the port
1062 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1063 * port cannot be frozen hardware-wise, the interrupt handler
1072 if (ap->ops->freeze) in __ata_port_freeze()
1073 ap->ops->freeze(ap); in __ata_port_freeze()
1075 ap->pflags |= ATA_PFLAG_FROZEN; in __ata_port_freeze()
1081 * ata_port_freeze - abort & freeze port
1103 * ata_eh_freeze_port - EH helper to freeze port
1115 spin_lock_irqsave(ap->lock, flags); in ata_eh_freeze_port()
1117 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_freeze_port()
1122 * ata_eh_thaw_port - EH helper to thaw port
1134 spin_lock_irqsave(ap->lock, flags); in ata_eh_thaw_port()
1136 ap->pflags &= ~ATA_PFLAG_FROZEN; in ata_eh_thaw_port()
1138 if (ap->ops->thaw) in ata_eh_thaw_port()
1139 ap->ops->thaw(ap); in ata_eh_thaw_port()
1141 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_thaw_port()
1153 struct ata_port *ap = qc->ap; in __ata_eh_qc_complete()
1154 struct scsi_cmnd *scmd = qc->scsicmd; in __ata_eh_qc_complete()
1157 spin_lock_irqsave(ap->lock, flags); in __ata_eh_qc_complete()
1158 qc->scsidone = ata_eh_scsidone; in __ata_eh_qc_complete()
1160 WARN_ON(ata_tag_valid(qc->tag)); in __ata_eh_qc_complete()
1161 spin_unlock_irqrestore(ap->lock, flags); in __ata_eh_qc_complete()
1163 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); in __ata_eh_qc_complete()
1167 * ata_eh_qc_complete - Complete an active ATA command from EH
1175 struct scsi_cmnd *scmd = qc->scsicmd; in ata_eh_qc_complete()
1176 scmd->retries = scmd->allowed; in ata_eh_qc_complete()
1181 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1182 * @qc: Command to retry
1187 * SCSI midlayer limits the number of retries to scmd->allowed.
1188 * scmd->allowed is incremented for commands which get retried
1189 * due to unrelated failures (qc->err_mask is zero).
1193 struct scsi_cmnd *scmd = qc->scsicmd; in ata_eh_qc_retry()
1194 if (!qc->err_mask) in ata_eh_qc_retry()
1195 scmd->allowed++; in ata_eh_qc_retry()
1200 * ata_dev_disable - disable ATA device
1216 dev->class++; in ata_dev_disable()
1221 ata_ering_clear(&dev->ering); in ata_dev_disable()
1226 * ata_eh_detach_dev - detach ATA device
1236 struct ata_link *link = dev->link; in ata_eh_detach_dev()
1237 struct ata_port *ap = link->ap; in ata_eh_detach_dev()
1238 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_detach_dev()
1250 spin_lock_irqsave(ap->lock, flags); in ata_eh_detach_dev()
1252 dev->flags &= ~ATA_DFLAG_DETACH; in ata_eh_detach_dev()
1255 dev->flags |= ATA_DFLAG_DETACHED; in ata_eh_detach_dev()
1256 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; in ata_eh_detach_dev()
1259 /* clear per-dev EH info */ in ata_eh_detach_dev()
1260 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); in ata_eh_detach_dev()
1261 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); in ata_eh_detach_dev()
1262 ehc->saved_xfer_mode[dev->devno] = 0; in ata_eh_detach_dev()
1263 ehc->saved_ncq_enabled &= ~(1 << dev->devno); in ata_eh_detach_dev()
1265 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_detach_dev()
1269 * ata_eh_about_to_do - about to perform eh_action
1271 * @dev: target ATA dev for per-dev action (can be NULL)
1275 * in @link->eh_info such that eh actions are not unnecessarily
1284 struct ata_port *ap = link->ap; in ata_eh_about_to_do()
1285 struct ata_eh_info *ehi = &link->eh_info; in ata_eh_about_to_do()
1286 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_about_to_do()
1289 trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action); in ata_eh_about_to_do()
1291 spin_lock_irqsave(ap->lock, flags); in ata_eh_about_to_do()
1298 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) in ata_eh_about_to_do()
1299 ap->pflags |= ATA_PFLAG_RECOVERED; in ata_eh_about_to_do()
1301 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_about_to_do()
1305 * ata_eh_done - EH action complete
1307 * @dev: target ATA dev for per-dev action (can be NULL)
1311 * in @link->eh_context.
1319 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_done()
1321 trace_ata_eh_done(link, dev ? dev->devno : 0, action); in ata_eh_done()
1323 ata_eh_clear_action(link, dev, &ehc->i, action); in ata_eh_done()
1327 * ata_err_string - convert err_mask to descriptive string
1330 * Convert @err_mask to descriptive string. Errors are
1366 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1397 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1411 struct scsi_cmnd *cmd = qc->scsicmd; in ata_eh_request_sense()
1412 struct ata_device *dev = qc->dev; in ata_eh_request_sense()
1416 if (ata_port_is_frozen(qc->ap)) { in ata_eh_request_sense()
1421 if (!ata_id_sense_reporting_enabled(dev->id)) { in ata_eh_request_sense()
1422 ata_dev_warn(qc->dev, "sense data reporting disabled\n"); in ata_eh_request_sense()
1436 /* Set sense without also setting scsicmd->result */ in ata_eh_request_sense()
1437 scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE, in ata_eh_request_sense()
1438 cmd->sense_buffer, tf.lbah, in ata_eh_request_sense()
1440 qc->flags |= ATA_QCFLAG_SENSE_VALID; in ata_eh_request_sense()
1452 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1471 struct ata_port *ap = dev->link->ap; in atapi_eh_request_sense()
1477 * for the case where they are -not- overwritten in atapi_eh_request_sense()
1489 if (ap->flags & ATA_FLAG_PIO_DMA) { in atapi_eh_request_sense()
1503 * ata_eh_analyze_serror - analyze SError for a failed port
1514 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_analyze_serror()
1515 u32 serror = ehc->i.serror; in ata_eh_analyze_serror()
1537 if (link->lpm_policy > ATA_LPM_MAX_POWER) in ata_eh_analyze_serror()
1539 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) in ata_eh_analyze_serror()
1545 ata_ehi_hotplugged(&ehc->i); in ata_eh_analyze_serror()
1547 ehc->i.err_mask |= err_mask; in ata_eh_analyze_serror()
1548 ehc->i.action |= action; in ata_eh_analyze_serror()
1552 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1567 const struct ata_taskfile *tf = &qc->result_tf; in ata_eh_analyze_tf()
1569 u8 stat = tf->status, err = tf->error; in ata_eh_analyze_tf()
1572 qc->err_mask |= AC_ERR_HSM; in ata_eh_analyze_tf()
1577 qc->err_mask |= AC_ERR_DEV; in ata_eh_analyze_tf()
1588 switch (qc->dev->class) { in ata_eh_analyze_tf()
1593 * -It was a non-NCQ command that failed, or in ata_eh_analyze_tf()
1594 * -It was a NCQ command that failed, but the sense data in ata_eh_analyze_tf()
1598 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID) && in ata_eh_analyze_tf()
1600 set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); in ata_eh_analyze_tf()
1602 qc->err_mask |= AC_ERR_ATA_BUS; in ata_eh_analyze_tf()
1604 qc->err_mask |= AC_ERR_MEDIA; in ata_eh_analyze_tf()
1606 qc->err_mask |= AC_ERR_INVALID; in ata_eh_analyze_tf()
1610 if (!ata_port_is_frozen(qc->ap)) { in ata_eh_analyze_tf()
1611 tmp = atapi_eh_request_sense(qc->dev, in ata_eh_analyze_tf()
1612 qc->scsicmd->sense_buffer, in ata_eh_analyze_tf()
1613 qc->result_tf.error >> 4); in ata_eh_analyze_tf()
1615 qc->flags |= ATA_QCFLAG_SENSE_VALID; in ata_eh_analyze_tf()
1617 qc->err_mask |= tmp; in ata_eh_analyze_tf()
1621 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { in ata_eh_analyze_tf()
1622 enum scsi_disposition ret = scsi_check_sense(qc->scsicmd); in ata_eh_analyze_tf()
1633 qc->flags |= ATA_QCFLAG_RETRY; in ata_eh_analyze_tf()
1634 qc->err_mask |= AC_ERR_OTHER; in ata_eh_analyze_tf()
1636 qc->err_mask |= AC_ERR_HSM; in ata_eh_analyze_tf()
1639 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) in ata_eh_analyze_tf()
1684 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) in speed_down_verdict_cb()
1685 return -1; in speed_down_verdict_cb()
1687 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, in speed_down_verdict_cb()
1688 &arg->xfer_ok); in speed_down_verdict_cb()
1689 arg->nr_errors[cat]++; in speed_down_verdict_cb()
1695 * ata_eh_speed_down_verdict - Determine speed down verdict
1722 * taken per error. An action triggered by non-DUBIOUS errors
1723 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1728 * DUBIOUS errors.
1730 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1733 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1736 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1739 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1742 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1743 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1760 arg.since = j64 - min(j64, j5mins); in ata_eh_speed_down_verdict()
1761 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); in ata_eh_speed_down_verdict()
1779 arg.since = j64 - min(j64, j10mins); in ata_eh_speed_down_verdict()
1780 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); in ata_eh_speed_down_verdict()
1795 * ata_eh_speed_down - record error and speed down if necessary
1819 /* don't bother if Cat-0 error */ in ata_eh_speed_down()
1824 ata_ering_record(&dev->ering, eflags, err_mask); in ata_eh_speed_down()
1829 dev->flags |= ATA_DFLAG_NCQ_OFF; in ata_eh_speed_down()
1830 ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); in ata_eh_speed_down()
1843 if (dev->spdn_cnt < 2) { in ata_eh_speed_down()
1850 if (dev->xfer_shift != ATA_SHIFT_PIO) in ata_eh_speed_down()
1851 sel = dma_dnxfer_sel[dev->spdn_cnt]; in ata_eh_speed_down()
1853 sel = pio_dnxfer_sel[dev->spdn_cnt]; in ata_eh_speed_down()
1855 dev->spdn_cnt++; in ata_eh_speed_down()
1867 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && in ata_eh_speed_down()
1868 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && in ata_eh_speed_down()
1869 (dev->xfer_shift != ATA_SHIFT_PIO)) { in ata_eh_speed_down()
1871 dev->spdn_cnt = 0; in ata_eh_speed_down()
1881 ata_ering_clear(&dev->ering); in ata_eh_speed_down()
1886 * ata_eh_worth_retry - analyze error and decide whether to retry
1887 * @qc: qc to possibly retry
1889 * Look at the cause of the error and decide if a retry
1890 * might be useful or not. We don't want to retry media errors
1891 * because the drive itself has probably already taken 10-30 seconds
1896 if (qc->err_mask & AC_ERR_MEDIA) in ata_eh_worth_retry()
1897 return 0; /* don't retry media errors */ in ata_eh_worth_retry()
1898 if (qc->flags & ATA_QCFLAG_IO) in ata_eh_worth_retry()
1899 return 1; /* otherwise retry anything from fs stack */ in ata_eh_worth_retry()
1900 if (qc->err_mask & AC_ERR_INVALID) in ata_eh_worth_retry()
1901 return 0; /* don't retry these */ in ata_eh_worth_retry()
1902 return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ in ata_eh_worth_retry()
1906 * ata_eh_quiet - check if we need to be quiet about a command error
1914 if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET) in ata_eh_quiet()
1915 qc->flags |= ATA_QCFLAG_QUIET; in ata_eh_quiet()
1916 return qc->flags & ATA_QCFLAG_QUIET; in ata_eh_quiet()
1921 struct ata_port *ap = link->ap; in ata_eh_read_sense_success_non_ncq()
1924 qc = __ata_qc_from_tag(ap, link->active_tag); in ata_eh_read_sense_success_non_ncq()
1926 return -EIO; in ata_eh_read_sense_success_non_ncq()
1928 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_read_sense_success_non_ncq()
1929 !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || in ata_eh_read_sense_success_non_ncq()
1930 qc->err_mask) in ata_eh_read_sense_success_non_ncq()
1931 return -EIO; in ata_eh_read_sense_success_non_ncq()
1934 return -EIO; in ata_eh_read_sense_success_non_ncq()
1941 scsi_check_sense(qc->scsicmd); in ata_eh_read_sense_success_non_ncq()
1948 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_get_success_sense()
1949 struct ata_device *dev = link->device; in ata_eh_get_success_sense()
1950 struct ata_port *ap = link->ap; in ata_eh_get_success_sense()
1954 if (!(ehc->i.dev_action[dev->devno] & ATA_EH_GET_SUCCESS_SENSE)) in ata_eh_get_success_sense()
1967 * data. Otherwise, we are dealing with a non-NCQ command and use in ata_eh_get_success_sense()
1970 if (link->sactive) in ata_eh_get_success_sense()
1991 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_get_success_sense()
1992 !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || in ata_eh_get_success_sense()
1993 qc->err_mask || in ata_eh_get_success_sense()
1994 ata_dev_phys_link(qc->dev) != link) in ata_eh_get_success_sense()
1998 if (qc->flags & ATA_QCFLAG_SENSE_VALID) in ata_eh_get_success_sense()
2002 if (!(qc->result_tf.status & ATA_SENSE)) in ata_eh_get_success_sense()
2006 ata_scsi_set_sense(dev, qc->scsicmd, ABORTED_COMMAND, 0, 0); in ata_eh_get_success_sense()
2007 qc->flags |= ATA_QCFLAG_SENSE_VALID; in ata_eh_get_success_sense()
2013 * ata_eh_link_autopsy - analyze error and determine recovery action
2025 struct ata_port *ap = link->ap; in ata_eh_link_autopsy()
2026 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_link_autopsy()
2034 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) in ata_eh_link_autopsy()
2040 ehc->i.serror |= serror; in ata_eh_link_autopsy()
2042 } else if (rc != -EOPNOTSUPP) { in ata_eh_link_autopsy()
2044 ehc->i.probe_mask |= ATA_ALL_DEVICES; in ata_eh_link_autopsy()
2045 ehc->i.action |= ATA_EH_RESET; in ata_eh_link_autopsy()
2046 ehc->i.err_mask |= AC_ERR_OTHER; in ata_eh_link_autopsy()
2061 if (ehc->i.err_mask & ~AC_ERR_OTHER) in ata_eh_link_autopsy()
2062 ehc->i.err_mask &= ~AC_ERR_OTHER; in ata_eh_link_autopsy()
2064 all_err_mask |= ehc->i.err_mask; in ata_eh_link_autopsy()
2067 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_link_autopsy()
2068 qc->flags & ATA_QCFLAG_RETRY || in ata_eh_link_autopsy()
2069 qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD || in ata_eh_link_autopsy()
2070 ata_dev_phys_link(qc->dev) != link) in ata_eh_link_autopsy()
2074 qc->err_mask |= ehc->i.err_mask; in ata_eh_link_autopsy()
2077 ehc->i.action |= ata_eh_analyze_tf(qc); in ata_eh_link_autopsy()
2079 /* DEV errors are probably spurious in case of ATA_BUS error */ in ata_eh_link_autopsy()
2080 if (qc->err_mask & AC_ERR_ATA_BUS) in ata_eh_link_autopsy()
2081 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | in ata_eh_link_autopsy()
2085 if (qc->err_mask & ~AC_ERR_OTHER) in ata_eh_link_autopsy()
2086 qc->err_mask &= ~AC_ERR_OTHER; in ata_eh_link_autopsy()
2095 if (qc->flags & ATA_QCFLAG_SENSE_VALID) in ata_eh_link_autopsy()
2096 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); in ata_eh_link_autopsy()
2098 qc->flags |= ATA_QCFLAG_RETRY; in ata_eh_link_autopsy()
2101 ehc->i.dev = qc->dev; in ata_eh_link_autopsy()
2102 all_err_mask |= qc->err_mask; in ata_eh_link_autopsy()
2103 if (qc->flags & ATA_QCFLAG_IO) in ata_eh_link_autopsy()
2107 /* Count quiet errors */ in ata_eh_link_autopsy()
2113 /* If all failed commands requested silence, then be quiet */ in ata_eh_link_autopsy()
2115 ehc->i.flags |= ATA_EHI_QUIET; in ata_eh_link_autopsy()
2120 ehc->i.action |= ATA_EH_RESET; in ata_eh_link_autopsy()
2123 ehc->i.action |= ATA_EH_REVALIDATE; in ata_eh_link_autopsy()
2126 * perform per-dev EH action only on the offending device. in ata_eh_link_autopsy()
2128 if (ehc->i.dev) { in ata_eh_link_autopsy()
2129 ehc->i.dev_action[ehc->i.dev->devno] |= in ata_eh_link_autopsy()
2130 ehc->i.action & ATA_EH_PERDEV_MASK; in ata_eh_link_autopsy()
2131 ehc->i.action &= ~ATA_EH_PERDEV_MASK; in ata_eh_link_autopsy()
2136 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; in ata_eh_link_autopsy()
2139 dev = ehc->i.dev; in ata_eh_link_autopsy()
2141 ata_dev_enabled(link->device)))) in ata_eh_link_autopsy()
2142 dev = link->device; in ata_eh_link_autopsy()
2145 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) in ata_eh_link_autopsy()
2147 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); in ata_eh_link_autopsy()
2148 trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); in ata_eh_link_autopsy()
2153 * ata_eh_autopsy - analyze error and determine recovery action
2156 * Analyze all links of @ap and determine why they failed and
2173 if (ap->slave_link) { in ata_eh_autopsy()
2174 struct ata_eh_context *mehc = &ap->link.eh_context; in ata_eh_autopsy()
2175 struct ata_eh_context *sehc = &ap->slave_link->eh_context; in ata_eh_autopsy()
2178 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; in ata_eh_autopsy()
2181 ata_eh_link_autopsy(ap->slave_link); in ata_eh_autopsy()
2184 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); in ata_eh_autopsy()
2185 mehc->i.action |= sehc->i.action; in ata_eh_autopsy()
2186 mehc->i.dev_action[1] |= sehc->i.dev_action[1]; in ata_eh_autopsy()
2187 mehc->i.flags |= sehc->i.flags; in ata_eh_autopsy()
2188 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); in ata_eh_autopsy()
2195 ata_eh_link_autopsy(&ap->link); in ata_eh_autopsy()
2199 * ata_get_cmd_name - get name for ATA command
2242 { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" }, in ata_get_cmd_name()
2271 { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, in ata_get_cmd_name()
2321 * ata_eh_link_report - report error handling to user
2331 struct ata_port *ap = link->ap; in ata_eh_link_report()
2332 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_link_report()
2338 if (ehc->i.flags & ATA_EHI_QUIET) in ata_eh_link_report()
2342 if (ehc->i.desc[0] != '\0') in ata_eh_link_report()
2343 desc = ehc->i.desc; in ata_eh_link_report()
2346 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_link_report()
2347 ata_dev_phys_link(qc->dev) != link || in ata_eh_link_report()
2348 ((qc->flags & ATA_QCFLAG_QUIET) && in ata_eh_link_report()
2349 qc->err_mask == AC_ERR_DEV)) in ata_eh_link_report()
2351 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) in ata_eh_link_report()
2357 if (!nr_failed && !ehc->i.err_mask) in ata_eh_link_report()
2364 if (ap->eh_tries < ATA_EH_MAX_TRIES) in ata_eh_link_report()
2366 ap->eh_tries); in ata_eh_link_report()
2368 if (ehc->i.dev) { in ata_eh_link_report()
2369 ata_dev_err(ehc->i.dev, "exception Emask 0x%x " in ata_eh_link_report()
2371 ehc->i.err_mask, link->sactive, ehc->i.serror, in ata_eh_link_report()
2372 ehc->i.action, frozen, tries_buf); in ata_eh_link_report()
2374 ata_dev_err(ehc->i.dev, "%s\n", desc); in ata_eh_link_report()
2378 ehc->i.err_mask, link->sactive, ehc->i.serror, in ata_eh_link_report()
2379 ehc->i.action, frozen, tries_buf); in ata_eh_link_report()
2385 if (ehc->i.serror) in ata_eh_link_report()
2388 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", in ata_eh_link_report()
2389 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", in ata_eh_link_report()
2390 ehc->i.serror & SERR_DATA ? "UnrecovData " : "", in ata_eh_link_report()
2391 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", in ata_eh_link_report()
2392 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", in ata_eh_link_report()
2393 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", in ata_eh_link_report()
2394 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", in ata_eh_link_report()
2395 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", in ata_eh_link_report()
2396 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", in ata_eh_link_report()
2397 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", in ata_eh_link_report()
2398 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", in ata_eh_link_report()
2399 ehc->i.serror & SERR_CRC ? "BadCRC " : "", in ata_eh_link_report()
2400 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", in ata_eh_link_report()
2401 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", in ata_eh_link_report()
2402 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", in ata_eh_link_report()
2403 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", in ata_eh_link_report()
2404 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); in ata_eh_link_report()
2408 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; in ata_eh_link_report()
2412 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_link_report()
2413 ata_dev_phys_link(qc->dev) != link || !qc->err_mask) in ata_eh_link_report()
2416 if (qc->dma_dir != DMA_NONE) { in ata_eh_link_report()
2424 switch (qc->tf.protocol) { in ata_eh_link_report()
2454 prot_str, qc->nbytes, dma_str[qc->dma_dir]); in ata_eh_link_report()
2457 if (ata_is_atapi(qc->tf.protocol)) { in ata_eh_link_report()
2458 const u8 *cdb = qc->cdb; in ata_eh_link_report()
2459 size_t cdb_len = qc->dev->cdb_len; in ata_eh_link_report()
2461 if (qc->scsicmd) { in ata_eh_link_report()
2462 cdb = qc->scsicmd->cmnd; in ata_eh_link_report()
2463 cdb_len = qc->scsicmd->cmd_len; in ata_eh_link_report()
2468 ata_dev_err(qc->dev, "failed command: %s\n", in ata_eh_link_report()
2469 ata_get_cmd_name(cmd->command)); in ata_eh_link_report()
2471 ata_dev_err(qc->dev, in ata_eh_link_report()
2476 cmd->command, cmd->feature, cmd->nsect, in ata_eh_link_report()
2477 cmd->lbal, cmd->lbam, cmd->lbah, in ata_eh_link_report()
2478 cmd->hob_feature, cmd->hob_nsect, in ata_eh_link_report()
2479 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, in ata_eh_link_report()
2480 cmd->device, qc->tag, data_buf, cdb_buf, in ata_eh_link_report()
2481 res->status, res->error, res->nsect, in ata_eh_link_report()
2482 res->lbal, res->lbam, res->lbah, in ata_eh_link_report()
2483 res->hob_feature, res->hob_nsect, in ata_eh_link_report()
2484 res->hob_lbal, res->hob_lbam, res->hob_lbah, in ata_eh_link_report()
2485 res->device, qc->err_mask, ata_err_string(qc->err_mask), in ata_eh_link_report()
2486 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); in ata_eh_link_report()
2489 if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | in ata_eh_link_report()
2491 if (res->status & ATA_BUSY) in ata_eh_link_report()
2492 ata_dev_err(qc->dev, "status: { Busy }\n"); in ata_eh_link_report()
2494 ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", in ata_eh_link_report()
2495 res->status & ATA_DRDY ? "DRDY " : "", in ata_eh_link_report()
2496 res->status & ATA_DF ? "DF " : "", in ata_eh_link_report()
2497 res->status & ATA_DRQ ? "DRQ " : "", in ata_eh_link_report()
2498 res->status & ATA_SENSE ? "SENSE " : "", in ata_eh_link_report()
2499 res->status & ATA_ERR ? "ERR " : ""); in ata_eh_link_report()
2502 if (cmd->command != ATA_CMD_PACKET && in ata_eh_link_report()
2503 (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF | in ata_eh_link_report()
2505 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", in ata_eh_link_report()
2506 res->error & ATA_ICRC ? "ICRC " : "", in ata_eh_link_report()
2507 res->error & ATA_UNC ? "UNC " : "", in ata_eh_link_report()
2508 res->error & ATA_AMNF ? "AMNF " : "", in ata_eh_link_report()
2509 res->error & ATA_IDNF ? "IDNF " : "", in ata_eh_link_report()
2510 res->error & ATA_ABORTED ? "ABRT " : ""); in ata_eh_link_report()
2516 * ata_eh_report - report error handling to user
2539 ata_for_each_dev(dev, link, ALL) in ata_do_reset()
2540 classes[dev->devno] = ATA_DEV_UNKNOWN; in ata_do_reset()
2547 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) in ata_eh_followup_srst_needed()
2549 if (rc == -EAGAIN) in ata_eh_followup_srst_needed()
2551 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) in ata_eh_followup_srst_needed()
2560 struct ata_port *ap = link->ap; in ata_eh_reset()
2561 struct ata_link *slave = ap->slave_link; in ata_eh_reset()
2562 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_reset()
2563 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; in ata_eh_reset()
2564 unsigned int *classes = ehc->classes; in ata_eh_reset()
2565 unsigned int lflags = link->flags; in ata_eh_reset()
2566 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); in ata_eh_reset()
2581 if (link->flags & ATA_LFLAG_RST_ONCE) in ata_eh_reset()
2583 if (link->flags & ATA_LFLAG_NO_HRST) in ata_eh_reset()
2585 if (link->flags & ATA_LFLAG_NO_SRST) in ata_eh_reset()
2589 if (ehc->i.flags & ATA_EHI_DID_RESET) { in ata_eh_reset()
2591 WARN_ON(time_after(ehc->last_reset, now)); in ata_eh_reset()
2592 deadline = ata_deadline(ehc->last_reset, in ata_eh_reset()
2595 schedule_timeout_uninterruptible(deadline - now); in ata_eh_reset()
2598 spin_lock_irqsave(ap->lock, flags); in ata_eh_reset()
2599 ap->pflags |= ATA_PFLAG_RESETTING; in ata_eh_reset()
2600 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_reset()
2604 ata_for_each_dev(dev, link, ALL) { in ata_eh_reset()
2612 dev->pio_mode = XFER_PIO_0; in ata_eh_reset()
2613 dev->dma_mode = 0xff; in ata_eh_reset()
2620 if (ap->ops->set_piomode) in ata_eh_reset()
2621 ap->ops->set_piomode(ap, dev); in ata_eh_reset()
2626 ehc->i.action &= ~ATA_EH_RESET; in ata_eh_reset()
2629 ehc->i.action |= ATA_EH_HARDRESET; in ata_eh_reset()
2632 ehc->i.action |= ATA_EH_SOFTRESET; in ata_eh_reset()
2640 sehc->i.action &= ~ATA_EH_RESET; in ata_eh_reset()
2641 sehc->i.action |= ehc->i.action; in ata_eh_reset()
2648 * -ENOENT or clear ATA_EH_RESET. in ata_eh_reset()
2650 if (slave && (rc == 0 || rc == -ENOENT)) { in ata_eh_reset()
2654 if (tmp != -ENOENT) in ata_eh_reset()
2657 ehc->i.action |= sehc->i.action; in ata_eh_reset()
2661 if (rc == -ENOENT) { in ata_eh_reset()
2662 ata_link_dbg(link, "port disabled--ignoring\n"); in ata_eh_reset()
2663 ehc->i.action &= ~ATA_EH_RESET; in ata_eh_reset()
2665 ata_for_each_dev(dev, link, ALL) in ata_eh_reset()
2666 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2679 if (reset && !(ehc->i.action & ATA_EH_RESET)) { in ata_eh_reset()
2680 ata_for_each_dev(dev, link, ALL) in ata_eh_reset()
2681 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2689 retry: in ata_eh_reset()
2704 ehc->last_reset = jiffies; in ata_eh_reset()
2706 ehc->i.flags |= ATA_EHI_DID_HARDRESET; in ata_eh_reset()
2709 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; in ata_eh_reset()
2718 if (rc && rc != -EAGAIN) { in ata_eh_reset()
2737 case -EAGAIN: in ata_eh_reset()
2738 rc = -EAGAIN; in ata_eh_reset()
2749 /* perform follow-up SRST if necessary */ in ata_eh_reset()
2756 "follow-up softreset required but no softreset available\n"); in ata_eh_reset()
2758 rc = -EINVAL; in ata_eh_reset()
2780 * Post-reset processing in ata_eh_reset()
2782 ata_for_each_dev(dev, link, ALL) { in ata_eh_reset()
2787 dev->pio_mode = XFER_PIO_0; in ata_eh_reset()
2788 dev->flags &= ~ATA_DFLAG_SLEEPING; in ata_eh_reset()
2795 classes[dev->devno] = ATA_DEV_ATA; in ata_eh_reset()
2797 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; in ata_eh_reset()
2802 link->sata_spd = (sstatus >> 4) & 0xf; in ata_eh_reset()
2804 slave->sata_spd = (sstatus >> 4) & 0xf; in ata_eh_reset()
2827 spin_lock_irqsave(link->ap->lock, flags); in ata_eh_reset()
2828 link->eh_info.serror = 0; in ata_eh_reset()
2830 slave->eh_info.serror = 0; in ata_eh_reset()
2831 spin_unlock_irqrestore(link->ap->lock, flags); in ata_eh_reset()
2842 ata_for_each_dev(dev, link, ALL) { in ata_eh_reset()
2844 if (classes[dev->devno] == ATA_DEV_UNKNOWN) { in ata_eh_reset()
2846 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2850 if (ata_class_enabled(classes[dev->devno])) in ata_eh_reset()
2853 classes[dev->devno]); in ata_eh_reset()
2854 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2855 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { in ata_eh_reset()
2858 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2868 rc = -EAGAIN; in ata_eh_reset()
2880 ehc->last_reset = jiffies; /* update to completion time */ in ata_eh_reset()
2881 ehc->i.action |= ATA_EH_REVALIDATE; in ata_eh_reset()
2882 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ in ata_eh_reset()
2887 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; in ata_eh_reset()
2889 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; in ata_eh_reset()
2891 spin_lock_irqsave(ap->lock, flags); in ata_eh_reset()
2892 ap->pflags &= ~ATA_PFLAG_RESETTING; in ata_eh_reset()
2893 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_reset()
2898 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ in ata_eh_reset()
2901 rc = -ERESTART; in ata_eh_reset()
2917 unsigned long delta = deadline - now; in ata_eh_reset()
2931 * They need to be reset - as well as the PMP - before retrying. in ata_eh_reset()
2933 if (rc == -ERESTART) { in ata_eh_reset()
2939 if (try == max_tries - 1) { in ata_eh_reset()
2943 } else if (rc == -EPIPE) in ata_eh_reset()
2948 goto retry; in ata_eh_reset()
2969 * Additionally, all write accesses to &ap->park_req_pending in ata_eh_pull_park_action()
2974 * *all* devices on port ap have been pulled into the in ata_eh_pull_park_action()
2976 * park_req_pending.done is non-zero by the time we reach in ata_eh_pull_park_action()
2983 spin_lock_irqsave(ap->lock, flags); in ata_eh_pull_park_action()
2984 reinit_completion(&ap->park_req_pending); in ata_eh_pull_park_action()
2986 ata_for_each_dev(dev, link, ALL) { in ata_eh_pull_park_action()
2987 struct ata_eh_info *ehi = &link->eh_info; in ata_eh_pull_park_action()
2989 link->eh_context.i.dev_action[dev->devno] |= in ata_eh_pull_park_action()
2990 ehi->dev_action[dev->devno] & ATA_EH_PARK; in ata_eh_pull_park_action()
2994 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_pull_park_action()
2999 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_park_issue_cmd()
3005 ehc->unloaded_mask |= 1 << dev->devno; in ata_eh_park_issue_cmd()
3012 ehc->unloaded_mask &= ~(1 << dev->devno); in ata_eh_park_issue_cmd()
3021 ehc->unloaded_mask &= ~(1 << dev->devno); in ata_eh_park_issue_cmd()
3028 struct ata_port *ap = link->ap; in ata_eh_revalidate_and_attach()
3029 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_revalidate_and_attach()
3036 * be done backwards such that PDIAG- is released by the slave in ata_eh_revalidate_and_attach()
3043 if (ehc->i.flags & ATA_EHI_DID_RESET) in ata_eh_revalidate_and_attach()
3056 WARN_ON(dev->class == ATA_DEV_PMP); in ata_eh_revalidate_and_attach()
3066 * to ap->target_lpm_policy after revalidation is done. in ata_eh_revalidate_and_attach()
3068 if (link->lpm_policy > ATA_LPM_MAX_POWER) { in ata_eh_revalidate_and_attach()
3076 rc = -EIO; in ata_eh_revalidate_and_attach()
3081 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], in ata_eh_revalidate_and_attach()
3091 ehc->i.flags |= ATA_EHI_SETMODE; in ata_eh_revalidate_and_attach()
3094 schedule_delayed_work(&ap->scsi_rescan_task, 0); in ata_eh_revalidate_and_attach()
3095 } else if (dev->class == ATA_DEV_UNKNOWN && in ata_eh_revalidate_and_attach()
3096 ehc->tries[dev->devno] && in ata_eh_revalidate_and_attach()
3097 ata_class_enabled(ehc->classes[dev->devno])) { in ata_eh_revalidate_and_attach()
3098 /* Temporarily set dev->class, it will be in ata_eh_revalidate_and_attach()
3099 * permanently set once all configurations are in ata_eh_revalidate_and_attach()
3104 dev->class = ehc->classes[dev->devno]; in ata_eh_revalidate_and_attach()
3106 if (dev->class == ATA_DEV_PMP) in ata_eh_revalidate_and_attach()
3109 rc = ata_dev_read_id(dev, &dev->class, in ata_eh_revalidate_and_attach()
3110 readid_flags, dev->id); in ata_eh_revalidate_and_attach()
3113 ehc->classes[dev->devno] = dev->class; in ata_eh_revalidate_and_attach()
3114 dev->class = ATA_DEV_UNKNOWN; in ata_eh_revalidate_and_attach()
3119 ata_ering_clear(&dev->ering); in ata_eh_revalidate_and_attach()
3120 new_mask |= 1 << dev->devno; in ata_eh_revalidate_and_attach()
3122 case -ENOENT: in ata_eh_revalidate_and_attach()
3123 /* IDENTIFY was issued to non-existent in ata_eh_revalidate_and_attach()
3135 /* PDIAG- should have been released, ask cable type if post-reset */ in ata_eh_revalidate_and_attach()
3136 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { in ata_eh_revalidate_and_attach()
3137 if (ap->ops->cable_detect) in ata_eh_revalidate_and_attach()
3138 ap->cbl = ap->ops->cable_detect(ap); in ata_eh_revalidate_and_attach()
3145 ata_for_each_dev(dev, link, ALL) { in ata_eh_revalidate_and_attach()
3146 if (!(new_mask & (1 << dev->devno))) in ata_eh_revalidate_and_attach()
3149 dev->class = ehc->classes[dev->devno]; in ata_eh_revalidate_and_attach()
3151 if (dev->class == ATA_DEV_PMP) in ata_eh_revalidate_and_attach()
3154 ehc->i.flags |= ATA_EHI_PRINTINFO; in ata_eh_revalidate_and_attach()
3156 ehc->i.flags &= ~ATA_EHI_PRINTINFO; in ata_eh_revalidate_and_attach()
3158 dev->class = ATA_DEV_UNKNOWN; in ata_eh_revalidate_and_attach()
3162 spin_lock_irqsave(ap->lock, flags); in ata_eh_revalidate_and_attach()
3163 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; in ata_eh_revalidate_and_attach()
3164 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_revalidate_and_attach()
3167 ehc->i.flags |= ATA_EHI_SETMODE; in ata_eh_revalidate_and_attach()
3178 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3194 struct ata_port *ap = link->ap; in ata_set_mode()
3200 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { in ata_set_mode()
3203 ent = ata_ering_top(&dev->ering); in ata_set_mode()
3205 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; in ata_set_mode()
3210 if (ap->ops->set_mode) in ata_set_mode()
3211 rc = ap->ops->set_mode(link, r_failed_dev); in ata_set_mode()
3217 struct ata_eh_context *ehc = &link->eh_context; in ata_set_mode()
3218 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; in ata_set_mode()
3219 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); in ata_set_mode()
3221 if (dev->xfer_mode != saved_xfer_mode || in ata_set_mode()
3223 dev->flags |= ATA_DFLAG_DUBIOUS_XFER; in ata_set_mode()
3230 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3241 * 0 on success, -errno on failure.
3248 u8 *sense_buffer = dev->link->ap->sector_buf; in atapi_eh_clear_ua()
3257 return -EIO; in atapi_eh_clear_ua()
3267 return -EIO; in atapi_eh_clear_ua()
3278 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3279 * @dev: ATA device which may need FLUSH retry
3288 * This function determines whether FLUSH failure retry is
3292 * 0 if EH can continue, -errno if EH needs to be repeated.
3296 struct ata_link *link = dev->link; in ata_eh_maybe_retry_flush()
3297 struct ata_port *ap = link->ap; in ata_eh_maybe_retry_flush()
3304 if (!ata_tag_valid(link->active_tag)) in ata_eh_maybe_retry_flush()
3307 qc = __ata_qc_from_tag(ap, link->active_tag); in ata_eh_maybe_retry_flush()
3308 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && in ata_eh_maybe_retry_flush()
3309 qc->tf.command != ATA_CMD_FLUSH)) in ata_eh_maybe_retry_flush()
3313 if (qc->err_mask & AC_ERR_DEV) in ata_eh_maybe_retry_flush()
3319 tf.command = qc->tf.command; in ata_eh_maybe_retry_flush()
3324 tf.command, qc->err_mask); in ata_eh_maybe_retry_flush()
3331 * Making sure retry is allowed at least once and in ata_eh_maybe_retry_flush()
3332 * retrying it should do the trick - whatever was in in ata_eh_maybe_retry_flush()
3336 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); in ata_eh_maybe_retry_flush()
3340 rc = -EIO; in ata_eh_maybe_retry_flush()
3344 qc->err_mask |= AC_ERR_DEV; in ata_eh_maybe_retry_flush()
3345 qc->result_tf = tf; in ata_eh_maybe_retry_flush()
3354 * ata_eh_set_lpm - configure SATA interface power management
3368 * 0 on success, -errno on failure.
3373 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; in ata_eh_set_lpm()
3374 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_set_lpm()
3376 enum ata_lpm_policy old_policy = link->lpm_policy; in ata_eh_set_lpm()
3377 bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; in ata_eh_set_lpm()
3384 (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) in ata_eh_set_lpm()
3394 bool hipm = ata_id_has_hipm(dev->id); in ata_eh_set_lpm()
3395 bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; in ata_eh_set_lpm()
3416 rc = -EIO; in ata_eh_set_lpm()
3423 rc = ap->ops->set_lpm(link, policy, hints); in ata_eh_set_lpm()
3424 if (!rc && ap->slave_link) in ata_eh_set_lpm()
3425 rc = ap->ops->set_lpm(ap->slave_link, policy, hints); in ata_eh_set_lpm()
3434 if (rc == -EOPNOTSUPP) { in ata_eh_set_lpm()
3435 link->flags |= ATA_LFLAG_NO_LPM; in ata_eh_set_lpm()
3446 link->lpm_policy = policy; in ata_eh_set_lpm()
3447 if (ap && ap->slave_link) in ata_eh_set_lpm()
3448 ap->slave_link->lpm_policy = policy; in ata_eh_set_lpm()
3453 ata_id_has_dipm(dev->id)) { in ata_eh_set_lpm()
3460 rc = -EIO; in ata_eh_set_lpm()
3466 link->last_lpm_change = jiffies; in ata_eh_set_lpm()
3467 link->flags |= ATA_LFLAG_CHANGED; in ata_eh_set_lpm()
3473 link->lpm_policy = old_policy; in ata_eh_set_lpm()
3474 if (ap && ap->slave_link) in ata_eh_set_lpm()
3475 ap->slave_link->lpm_policy = old_policy; in ata_eh_set_lpm()
3478 if (!dev || ehc->tries[dev->devno] <= 2) { in ata_eh_set_lpm()
3480 link->flags |= ATA_LFLAG_NO_LPM; in ata_eh_set_lpm()
3502 ata_for_each_dev(dev, link, ALL) in ata_link_nr_vacant()
3503 if (dev->class == ATA_DEV_UNKNOWN) in ata_link_nr_vacant()
3510 struct ata_port *ap = link->ap; in ata_eh_skip_recovery()
3511 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_skip_recovery()
3515 if (link->flags & ATA_LFLAG_DISABLED) in ata_eh_skip_recovery()
3519 if (ehc->i.flags & ATA_EHI_NO_RECOVERY) in ata_eh_skip_recovery()
3527 if ((ehc->i.action & ATA_EH_RESET) && in ata_eh_skip_recovery()
3528 !(ehc->i.flags & ATA_EHI_DID_RESET)) in ata_eh_skip_recovery()
3531 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ in ata_eh_skip_recovery()
3532 ata_for_each_dev(dev, link, ALL) { in ata_eh_skip_recovery()
3533 if (dev->class == ATA_DEV_UNKNOWN && in ata_eh_skip_recovery()
3534 ehc->classes[dev->devno] != ATA_DEV_NONE) in ata_eh_skip_recovery()
3547 if ((ent->eflags & ATA_EFLAG_OLD_ER) || in ata_count_probe_trials_cb()
3548 (ent->timestamp < now - min(now, interval))) in ata_count_probe_trials_cb()
3549 return -1; in ata_count_probe_trials_cb()
3557 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_schedule_probe()
3561 if (!(ehc->i.probe_mask & (1 << dev->devno)) || in ata_eh_schedule_probe()
3562 (ehc->did_probe_mask & (1 << dev->devno))) in ata_eh_schedule_probe()
3567 ehc->did_probe_mask |= (1 << dev->devno); in ata_eh_schedule_probe()
3568 ehc->i.action |= ATA_EH_RESET; in ata_eh_schedule_probe()
3569 ehc->saved_xfer_mode[dev->devno] = 0; in ata_eh_schedule_probe()
3570 ehc->saved_ncq_enabled &= ~(1 << dev->devno); in ata_eh_schedule_probe()
3573 if (link->lpm_policy > ATA_LPM_MAX_POWER) { in ata_eh_schedule_probe()
3575 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, in ata_eh_schedule_probe()
3595 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); in ata_eh_schedule_probe()
3596 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); in ata_eh_schedule_probe()
3606 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_handle_dev_fail()
3608 /* -EAGAIN from EH routine indicates retry without prejudice. in ata_eh_handle_dev_fail()
3611 if (err != -EAGAIN) in ata_eh_handle_dev_fail()
3612 ehc->tries[dev->devno]--; in ata_eh_handle_dev_fail()
3615 case -ENODEV: in ata_eh_handle_dev_fail()
3617 ehc->i.probe_mask |= (1 << dev->devno); in ata_eh_handle_dev_fail()
3619 case -EINVAL: in ata_eh_handle_dev_fail()
3621 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); in ata_eh_handle_dev_fail()
3623 case -EIO: in ata_eh_handle_dev_fail()
3624 if (ehc->tries[dev->devno] == 1) { in ata_eh_handle_dev_fail()
3629 if (dev->pio_mode > XFER_PIO_0) in ata_eh_handle_dev_fail()
3634 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { in ata_eh_handle_dev_fail()
3635 /* disable device if it has used up all its chances */ in ata_eh_handle_dev_fail()
3644 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; in ata_eh_handle_dev_fail()
3645 memset(ehc->cmd_timeout_idx[dev->devno], 0, in ata_eh_handle_dev_fail()
3646 sizeof(ehc->cmd_timeout_idx[dev->devno])); in ata_eh_handle_dev_fail()
3651 ehc->i.action |= ATA_EH_RESET; in ata_eh_handle_dev_fail()
3657 * ata_eh_recover - recover host port after error
3668 * link's eh_context. This function executes all the operations
3676 * 0 on success, -errno on failure.
3690 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3692 /* re-enable link? */ in ata_eh_recover()
3693 if (ehc->i.action & ATA_EH_ENABLE_LINK) { in ata_eh_recover()
3695 spin_lock_irqsave(ap->lock, flags); in ata_eh_recover()
3696 link->flags &= ~ATA_LFLAG_DISABLED; in ata_eh_recover()
3697 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_recover()
3701 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3702 if (link->flags & ATA_LFLAG_NO_RETRY) in ata_eh_recover()
3703 ehc->tries[dev->devno] = 1; in ata_eh_recover()
3705 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; in ata_eh_recover()
3708 ehc->i.action |= ehc->i.dev_action[dev->devno] & in ata_eh_recover()
3710 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; in ata_eh_recover()
3713 if (dev->flags & ATA_DFLAG_DETACH) in ata_eh_recover()
3722 retry: in ata_eh_recover()
3726 if (ap->pflags & ATA_PFLAG_UNLOADING) in ata_eh_recover()
3731 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3735 ehc->i.action = 0; in ata_eh_recover()
3737 ata_for_each_dev(dev, link, ALL) in ata_eh_recover()
3738 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; in ata_eh_recover()
3743 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3745 if (!(ehc->i.action & ATA_EH_RESET)) in ata_eh_recover()
3761 * ap->park_req_pending in ata_eh_recover()
3767 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3768 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3771 if (dev->class != ATA_DEV_ATA && in ata_eh_recover()
3772 dev->class != ATA_DEV_ZAC) in ata_eh_recover()
3774 if (!(ehc->i.dev_action[dev->devno] & in ata_eh_recover()
3777 tmp = dev->unpark_deadline; in ata_eh_recover()
3782 if (ehc->unloaded_mask & (1 << dev->devno)) in ata_eh_recover()
3794 deadline = wait_for_completion_timeout(&ap->park_req_pending, in ata_eh_recover()
3795 deadline - now); in ata_eh_recover()
3799 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3800 if (!(link->eh_context.unloaded_mask & in ata_eh_recover()
3801 (1 << dev->devno))) in ata_eh_recover()
3812 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3823 if (link->device->class == ATA_DEV_PMP) { in ata_eh_recover()
3824 ehc->i.action = 0; in ata_eh_recover()
3829 if (ehc->i.flags & ATA_EHI_SETMODE) { in ata_eh_recover()
3833 ehc->i.flags &= ~ATA_EHI_SETMODE; in ata_eh_recover()
3839 if (ehc->i.flags & ATA_EHI_DID_RESET) { in ata_eh_recover()
3840 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3841 if (dev->class != ATA_DEV_ATAPI) in ata_eh_recover()
3851 /* retry flush if necessary */ in ata_eh_recover()
3852 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3853 if (dev->class != ATA_DEV_ATA && in ata_eh_recover()
3854 dev->class != ATA_DEV_ZAC) in ata_eh_recover()
3863 if (link->lpm_policy != ap->target_lpm_policy) { in ata_eh_recover()
3864 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); in ata_eh_recover()
3870 ehc->i.flags = 0; in ata_eh_recover()
3880 * Can't retry if it's frozen. in ata_eh_recover()
3889 goto retry; in ata_eh_recover()
3899 * ata_eh_finish - finish up EH
3902 * Recovery is complete. Clean up EH states and retry or finish
3913 /* retry or finish qcs */ in ata_eh_finish()
3915 if (!(qc->flags & ATA_QCFLAG_EH)) in ata_eh_finish()
3918 if (qc->err_mask) { in ata_eh_finish()
3923 if (qc->flags & ATA_QCFLAG_RETRY) { in ata_eh_finish()
3925 * Since qc->err_mask is set, ata_eh_qc_retry() in ata_eh_finish()
3926 * will not increment scmd->allowed, so upper in ata_eh_finish()
3927 * layer will only retry the command if it has in ata_eh_finish()
3935 if (qc->flags & ATA_QCFLAG_SENSE_VALID || in ata_eh_finish()
3936 qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) { in ata_eh_finish()
3940 memset(&qc->result_tf, 0, sizeof(qc->result_tf)); in ata_eh_finish()
3942 * Since qc->err_mask is not set, in ata_eh_finish()
3944 * scmd->allowed, so upper layer is guaranteed in ata_eh_finish()
3945 * to retry the command. in ata_eh_finish()
3953 WARN_ON(ap->nr_active_links); in ata_eh_finish()
3954 ap->nr_active_links = 0; in ata_eh_finish()
3958 * ata_do_eh - do standard error handling
3984 ata_for_each_dev(dev, &ap->link, ALL) in ata_do_eh()
3992 * ata_std_error_handler - standard error handler
4002 struct ata_port_operations *ops = ap->ops; in ata_std_error_handler()
4003 ata_reset_fn_t hardreset = ops->hardreset; in ata_std_error_handler()
4005 /* ignore built-in hardreset if SCR access is not available */ in ata_std_error_handler()
4006 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) in ata_std_error_handler()
4009 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); in ata_std_error_handler()
4015 * ata_eh_handle_port_suspend - perform port suspend operation
4031 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_suspend()
4032 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || in ata_eh_handle_port_suspend()
4033 ap->pm_mesg.event & PM_EVENT_RESUME) { in ata_eh_handle_port_suspend()
4034 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_suspend()
4037 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_suspend()
4039 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); in ata_eh_handle_port_suspend()
4041 /* Set all devices attached to the port in standby mode */ in ata_eh_handle_port_suspend()
4052 if (PMSG_IS_AUTO(ap->pm_mesg)) { in ata_eh_handle_port_suspend()
4053 ata_for_each_dev(dev, &ap->link, ENABLED) { in ata_eh_handle_port_suspend()
4062 if (ap->ops->port_suspend) in ata_eh_handle_port_suspend()
4063 rc = ap->ops->port_suspend(ap, ap->pm_mesg); in ata_eh_handle_port_suspend()
4065 ata_acpi_set_state(ap, ap->pm_mesg); in ata_eh_handle_port_suspend()
4068 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_suspend()
4070 ap->pflags &= ~ATA_PFLAG_PM_PENDING; in ata_eh_handle_port_suspend()
4072 ap->pflags |= ATA_PFLAG_SUSPENDED; in ata_eh_handle_port_suspend()
4076 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_suspend()
4082 * ata_eh_handle_port_resume - perform port resume operation
4097 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_resume()
4098 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || in ata_eh_handle_port_resume()
4099 !(ap->pm_mesg.event & PM_EVENT_RESUME)) { in ata_eh_handle_port_resume()
4100 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_resume()
4103 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_resume()
4105 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); in ata_eh_handle_port_resume()
4115 ata_for_each_dev(dev, link, ALL) in ata_eh_handle_port_resume()
4116 ata_ering_clear(&dev->ering); in ata_eh_handle_port_resume()
4118 ata_acpi_set_state(ap, ap->pm_mesg); in ata_eh_handle_port_resume()
4120 if (ap->ops->port_resume) in ata_eh_handle_port_resume()
4121 ap->ops->port_resume(ap); in ata_eh_handle_port_resume()
4127 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_resume()
4128 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); in ata_eh_handle_port_resume()
4129 ap->pflags |= ATA_PFLAG_RESUMING; in ata_eh_handle_port_resume()
4130 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_resume()