Lines Matching +full:- +full:- +full:retry +full:- +full:all +full:- +full:errors

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-eh.c - libata error handling
8 * as Documentation/driver-api/libata.rst
11 * http://www.sata-io.org/
59 /* Waiting in ->prereset can never be reliable. It's
76 * hardreset. All others are hardreset if available. In most cases
97 15000, /* Some drives are slow to read log pages when waking-up */
125 * On the retry after a command timed out, the next timeout value from
129 * ehc->cmd_timeout_idx keeps track of which timeout to use per
172 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, in __ata_ehi_pushv_desc()
173 ATA_EH_DESC_LEN - ehi->desc_len, in __ata_ehi_pushv_desc()
178 * __ata_ehi_push_desc - push error description without adding separator
182 * Format string according to @fmt and append it to @ehi->desc.
198 * ata_ehi_push_desc - push error description with separator
202 * Format string according to @fmt and append it to @ehi->desc.
203 * If @ehi->desc is not empty, ", " is added in-between.
212 if (ehi->desc_len) in ata_ehi_push_desc()
222 * ata_ehi_clear_desc - clean error description
225 * Clear @ehi->desc.
232 ehi->desc[0] = '\0'; in ata_ehi_clear_desc()
233 ehi->desc_len = 0; in ata_ehi_clear_desc()
238 * ata_port_desc - append port description
244 * in-between. This function is to be used while initializing
254 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); in ata_port_desc()
256 if (ap->link.eh_info.desc_len) in ata_port_desc()
257 __ata_ehi_push_desc(&ap->link.eh_info, " "); in ata_port_desc()
260 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); in ata_port_desc()
267 * ata_port_pbar_desc - append PCI BAR description
284 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in ata_port_pbar_desc()
317 return -1; in ata_lookup_timeout_table()
321 * ata_internal_cmd_timeout - determine timeout for an internal command
335 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_internal_cmd_timeout()
342 idx = ehc->cmd_timeout_idx[dev->devno][ent]; in ata_internal_cmd_timeout()
347 * ata_internal_cmd_timed_out - notification for internal command timeout
360 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_internal_cmd_timed_out()
367 idx = ehc->cmd_timeout_idx[dev->devno][ent]; in ata_internal_cmd_timed_out()
369 ehc->cmd_timeout_idx[dev->devno][ent]++; in ata_internal_cmd_timed_out()
379 ering->cursor++; in ata_ering_record()
380 ering->cursor %= ATA_ERING_SIZE; in ata_ering_record()
382 ent = &ering->ring[ering->cursor]; in ata_ering_record()
383 ent->eflags = eflags; in ata_ering_record()
384 ent->err_mask = err_mask; in ata_ering_record()
385 ent->timestamp = get_jiffies_64(); in ata_ering_record()
390 struct ata_ering_entry *ent = &ering->ring[ering->cursor]; in ata_ering_top()
392 if (ent->err_mask) in ata_ering_top()
404 idx = ering->cursor; in ata_ering_map()
406 ent = &ering->ring[idx]; in ata_ering_map()
407 if (!ent->err_mask) in ata_ering_map()
412 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; in ata_ering_map()
413 } while (idx != ering->cursor); in ata_ering_map()
420 ent->eflags |= ATA_EFLAG_OLD_ER; in ata_ering_clear_cb()
431 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_dev_action()
433 return ehc->i.action | ehc->i.dev_action[dev->devno]; in ata_eh_dev_action()
442 ehi->action &= ~action; in ata_eh_clear_action()
443 ata_for_each_dev(tdev, link, ALL) in ata_eh_clear_action()
444 ehi->dev_action[tdev->devno] &= ~action; in ata_eh_clear_action()
446 /* doesn't make sense for port-wide EH actions */ in ata_eh_clear_action()
449 /* break ehi->action into ehi->dev_action */ in ata_eh_clear_action()
450 if (ehi->action & action) { in ata_eh_clear_action()
451 ata_for_each_dev(tdev, link, ALL) in ata_eh_clear_action()
452 ehi->dev_action[tdev->devno] |= in ata_eh_clear_action()
453 ehi->action & action; in ata_eh_clear_action()
454 ehi->action &= ~action; in ata_eh_clear_action()
457 /* turn off the specified per-dev action */ in ata_eh_clear_action()
458 ehi->dev_action[dev->devno] &= ~action; in ata_eh_clear_action()
463 * ata_eh_acquire - acquire EH ownership
475 mutex_lock(&ap->host->eh_mutex); in ata_eh_acquire()
476 WARN_ON_ONCE(ap->host->eh_owner); in ata_eh_acquire()
477 ap->host->eh_owner = current; in ata_eh_acquire()
481 * ata_eh_release - release EH ownership
492 WARN_ON_ONCE(ap->host->eh_owner != current); in ata_eh_release()
493 ap->host->eh_owner = NULL; in ata_eh_release()
494 mutex_unlock(&ap->host->eh_mutex); in ata_eh_release()
501 dev->class++; in ata_eh_dev_disable()
506 ata_ering_clear(&dev->ering); in ata_eh_dev_disable()
516 * Unless we are restarting, transition all enabled devices to in ata_eh_unload()
531 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); in ata_eh_unload()
537 spin_lock_irqsave(ap->lock, flags); in ata_eh_unload()
540 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ in ata_eh_unload()
541 ap->pflags |= ATA_PFLAG_UNLOADED; in ata_eh_unload()
543 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_unload()
547 * ata_scsi_error - SCSI layer error handler callback
550 * Handles SCSI-layer-thrown error events.
564 spin_lock_irqsave(host->host_lock, flags); in ata_scsi_error()
565 list_splice_init(&host->eh_cmd_q, &eh_work_q); in ata_scsi_error()
566 spin_unlock_irqrestore(host->host_lock, flags); in ata_scsi_error()
574 /* finish or retry handled scmd's and clean up */ in ata_scsi_error()
580 * ata_scsi_cmd_error_handler - error callback for a list of commands
586 * ap->eh_done_q. This function is the first part of the libata error
603 * For EH, all qcs are finished in one of three ways - in ata_scsi_cmd_error_handler()
615 spin_lock_irqsave(ap->lock, flags); in ata_scsi_cmd_error_handler()
618 * This must occur under the ap->lock as we don't want in ata_scsi_cmd_error_handler()
622 * non-notified command and completes much like an IRQ handler. in ata_scsi_cmd_error_handler()
627 if (ap->ops->lost_interrupt) in ata_scsi_cmd_error_handler()
628 ap->ops->lost_interrupt(ap); in ata_scsi_cmd_error_handler()
634 if (qc->flags & ATA_QCFLAG_ACTIVE && in ata_scsi_cmd_error_handler()
635 qc->scsicmd == scmd) in ata_scsi_cmd_error_handler()
641 if (!(qc->flags & ATA_QCFLAG_EH)) { in ata_scsi_cmd_error_handler()
643 qc->err_mask |= AC_ERR_TIMEOUT; in ata_scsi_cmd_error_handler()
644 qc->flags |= ATA_QCFLAG_EH; in ata_scsi_cmd_error_handler()
652 scmd->retries = scmd->allowed; in ata_scsi_cmd_error_handler()
653 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); in ata_scsi_cmd_error_handler()
668 ap->eh_tries = ATA_EH_MAX_TRIES; in ata_scsi_cmd_error_handler()
670 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_cmd_error_handler()
675 * ata_scsi_port_error_handler - recover the port after the commands
679 * Handle the recovery of the port @ap after all the commands
691 del_timer_sync(&ap->fastdrain_timer); in ata_scsi_port_error_handler()
697 spin_lock_irqsave(ap->lock, flags); in ata_scsi_port_error_handler()
700 struct ata_eh_context *ehc = &link->eh_context; in ata_scsi_port_error_handler()
703 memset(&link->eh_context, 0, sizeof(link->eh_context)); in ata_scsi_port_error_handler()
704 link->eh_context.i = link->eh_info; in ata_scsi_port_error_handler()
705 memset(&link->eh_info, 0, sizeof(link->eh_info)); in ata_scsi_port_error_handler()
708 int devno = dev->devno; in ata_scsi_port_error_handler()
710 ehc->saved_xfer_mode[devno] = dev->xfer_mode; in ata_scsi_port_error_handler()
712 ehc->saved_ncq_enabled |= 1 << devno; in ata_scsi_port_error_handler()
715 if (ap->pflags & ATA_PFLAG_RESUMING) in ata_scsi_port_error_handler()
716 ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE; in ata_scsi_port_error_handler()
720 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; in ata_scsi_port_error_handler()
721 ap->pflags &= ~ATA_PFLAG_EH_PENDING; in ata_scsi_port_error_handler()
722 ap->excl_link = NULL; /* don't maintain exclusion over EH */ in ata_scsi_port_error_handler()
724 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
727 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) in ata_scsi_port_error_handler()
728 ap->ops->error_handler(ap); in ata_scsi_port_error_handler()
731 if ((ap->pflags & ATA_PFLAG_UNLOADING) && in ata_scsi_port_error_handler()
732 !(ap->pflags & ATA_PFLAG_UNLOADED)) in ata_scsi_port_error_handler()
741 * Exception might have happened after ->error_handler recovered the in ata_scsi_port_error_handler()
744 spin_lock_irqsave(ap->lock, flags); in ata_scsi_port_error_handler()
746 if (ap->pflags & ATA_PFLAG_EH_PENDING) { in ata_scsi_port_error_handler()
747 if (--ap->eh_tries) { in ata_scsi_port_error_handler()
748 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
754 ap->pflags &= ~ATA_PFLAG_EH_PENDING; in ata_scsi_port_error_handler()
759 memset(&link->eh_info, 0, sizeof(link->eh_info)); in ata_scsi_port_error_handler()
762 * end eh (clear host_eh_scheduled) while holding ap->lock such that if in ata_scsi_port_error_handler()
764 * midlayer will re-initiate EH. in ata_scsi_port_error_handler()
766 ap->ops->end_eh(ap); in ata_scsi_port_error_handler()
768 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
771 scsi_eh_flush_done_q(&ap->eh_done_q); in ata_scsi_port_error_handler()
774 spin_lock_irqsave(ap->lock, flags); in ata_scsi_port_error_handler()
776 ap->pflags &= ~ATA_PFLAG_RESUMING; in ata_scsi_port_error_handler()
778 if (ap->pflags & ATA_PFLAG_LOADING) in ata_scsi_port_error_handler()
779 ap->pflags &= ~ATA_PFLAG_LOADING; in ata_scsi_port_error_handler()
780 else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) && in ata_scsi_port_error_handler()
781 !(ap->flags & ATA_FLAG_SAS_HOST)) in ata_scsi_port_error_handler()
782 schedule_delayed_work(&ap->hotplug_task, 0); in ata_scsi_port_error_handler()
784 if (ap->pflags & ATA_PFLAG_RECOVERED) in ata_scsi_port_error_handler()
787 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); in ata_scsi_port_error_handler()
790 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; in ata_scsi_port_error_handler()
791 wake_up_all(&ap->eh_wait_q); in ata_scsi_port_error_handler()
793 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
798 * ata_port_wait_eh - Wait for the currently pending EH to complete
811 retry: in ata_port_wait_eh()
812 spin_lock_irqsave(ap->lock, flags); in ata_port_wait_eh()
814 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { in ata_port_wait_eh()
815 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); in ata_port_wait_eh()
816 spin_unlock_irqrestore(ap->lock, flags); in ata_port_wait_eh()
818 spin_lock_irqsave(ap->lock, flags); in ata_port_wait_eh()
820 finish_wait(&ap->eh_wait_q, &wait); in ata_port_wait_eh()
822 spin_unlock_irqrestore(ap->lock, flags); in ata_port_wait_eh()
825 if (scsi_host_in_recovery(ap->scsi_host)) { in ata_port_wait_eh()
827 goto retry; in ata_port_wait_eh()
838 /* count only non-internal commands */ in ata_eh_nr_in_flight()
853 spin_lock_irqsave(ap->lock, flags); in ata_eh_fastdrain_timerfn()
861 if (cnt == ap->fastdrain_cnt) { in ata_eh_fastdrain_timerfn()
865 /* No progress during the last interval, tag all in ata_eh_fastdrain_timerfn()
866 * in-flight qcs as timed out and freeze the port. in ata_eh_fastdrain_timerfn()
870 qc->err_mask |= AC_ERR_TIMEOUT; in ata_eh_fastdrain_timerfn()
876 ap->fastdrain_cnt = cnt; in ata_eh_fastdrain_timerfn()
877 ap->fastdrain_timer.expires = in ata_eh_fastdrain_timerfn()
879 add_timer(&ap->fastdrain_timer); in ata_eh_fastdrain_timerfn()
883 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_fastdrain_timerfn()
887 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
892 * is non-zero and EH wasn't pending before. Fast drain ensures
903 if (ap->pflags & ATA_PFLAG_EH_PENDING) in ata_eh_set_pending()
906 ap->pflags |= ATA_PFLAG_EH_PENDING; in ata_eh_set_pending()
911 /* do we have in-flight qcs? */ in ata_eh_set_pending()
917 ap->fastdrain_cnt = cnt; in ata_eh_set_pending()
918 ap->fastdrain_timer.expires = in ata_eh_set_pending()
920 add_timer(&ap->fastdrain_timer); in ata_eh_set_pending()
924 * ata_qc_schedule_eh - schedule qc for error handling
935 struct ata_port *ap = qc->ap; in ata_qc_schedule_eh()
937 qc->flags |= ATA_QCFLAG_EH; in ata_qc_schedule_eh()
945 blk_abort_request(scsi_cmd_to_rq(qc->scsicmd)); in ata_qc_schedule_eh()
949 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
957 if (ap->pflags & ATA_PFLAG_INITIALIZING) in ata_std_sched_eh()
961 scsi_schedule_eh(ap->scsi_host); in ata_std_sched_eh()
968 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
972 * shost, so host fields can be directly manipulated under ap->lock, in
973 * the libsas case we need to hold a lock at the ha->level to coordinate
981 struct Scsi_Host *host = ap->scsi_host; in ata_std_end_eh()
983 host->host_eh_scheduled = 0; in ata_std_end_eh()
989 * ata_port_schedule_eh - schedule error handling without a qc
993 * all commands are drained.
1001 ap->ops->sched_eh(ap); in ata_port_schedule_eh()
1010 /* we're gonna abort all commands, no need for fast drain */ in ata_do_link_abort()
1015 if (qc && (!link || qc->dev->link == link)) { in ata_do_link_abort()
1016 qc->flags |= ATA_QCFLAG_EH; in ata_do_link_abort()
1029 * ata_link_abort - abort all qc's on the link
1032 * Abort all active qc's active on @link and schedule EH.
1042 return ata_do_link_abort(link->ap, link); in ata_link_abort()
1047 * ata_port_abort - abort all qc's on the port
1050 * Abort all active qc's of @ap and schedule EH.
1065 * __ata_port_freeze - freeze port
1073 * ap->ops->freeze() callback can be used for freezing the port
1074 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1075 * port cannot be frozen hardware-wise, the interrupt handler
1084 if (ap->ops->freeze) in __ata_port_freeze()
1085 ap->ops->freeze(ap); in __ata_port_freeze()
1087 ap->pflags |= ATA_PFLAG_FROZEN; in __ata_port_freeze()
1093 * ata_port_freeze - abort & freeze port
1115 * ata_eh_freeze_port - EH helper to freeze port
1127 spin_lock_irqsave(ap->lock, flags); in ata_eh_freeze_port()
1129 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_freeze_port()
1134 * ata_eh_thaw_port - EH helper to thaw port
1146 spin_lock_irqsave(ap->lock, flags); in ata_eh_thaw_port()
1148 ap->pflags &= ~ATA_PFLAG_FROZEN; in ata_eh_thaw_port()
1150 if (ap->ops->thaw) in ata_eh_thaw_port()
1151 ap->ops->thaw(ap); in ata_eh_thaw_port()
1153 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_thaw_port()
1165 struct ata_port *ap = qc->ap; in __ata_eh_qc_complete()
1166 struct scsi_cmnd *scmd = qc->scsicmd; in __ata_eh_qc_complete()
1169 spin_lock_irqsave(ap->lock, flags); in __ata_eh_qc_complete()
1170 qc->scsidone = ata_eh_scsidone; in __ata_eh_qc_complete()
1172 WARN_ON(ata_tag_valid(qc->tag)); in __ata_eh_qc_complete()
1173 spin_unlock_irqrestore(ap->lock, flags); in __ata_eh_qc_complete()
1175 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); in __ata_eh_qc_complete()
1179 * ata_eh_qc_complete - Complete an active ATA command from EH
1187 struct scsi_cmnd *scmd = qc->scsicmd; in ata_eh_qc_complete()
1188 scmd->retries = scmd->allowed; in ata_eh_qc_complete()
1193 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1194 * @qc: Command to retry
1199 * SCSI midlayer limits the number of retries to scmd->allowed.
1200 * scmd->allowed is incremented for commands which get retried
1201 * due to unrelated failures (qc->err_mask is zero).
1205 struct scsi_cmnd *scmd = qc->scsicmd; in ata_eh_qc_retry()
1206 if (!qc->err_mask) in ata_eh_qc_retry()
1207 scmd->allowed++; in ata_eh_qc_retry()
1212 * ata_dev_disable - disable ATA device
1232 * ata_eh_detach_dev - detach ATA device
1242 struct ata_link *link = dev->link; in ata_eh_detach_dev()
1243 struct ata_port *ap = link->ap; in ata_eh_detach_dev()
1244 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_detach_dev()
1256 spin_lock_irqsave(ap->lock, flags); in ata_eh_detach_dev()
1258 dev->flags &= ~ATA_DFLAG_DETACH; in ata_eh_detach_dev()
1261 dev->flags |= ATA_DFLAG_DETACHED; in ata_eh_detach_dev()
1262 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; in ata_eh_detach_dev()
1265 /* clear per-dev EH info */ in ata_eh_detach_dev()
1266 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); in ata_eh_detach_dev()
1267 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); in ata_eh_detach_dev()
1268 ehc->saved_xfer_mode[dev->devno] = 0; in ata_eh_detach_dev()
1269 ehc->saved_ncq_enabled &= ~(1 << dev->devno); in ata_eh_detach_dev()
1271 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_detach_dev()
1275 * ata_eh_about_to_do - about to perform eh_action
1277 * @dev: target ATA dev for per-dev action (can be NULL)
1281 * in @link->eh_info such that eh actions are not unnecessarily
1290 struct ata_port *ap = link->ap; in ata_eh_about_to_do()
1291 struct ata_eh_info *ehi = &link->eh_info; in ata_eh_about_to_do()
1292 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_about_to_do()
1295 trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action); in ata_eh_about_to_do()
1297 spin_lock_irqsave(ap->lock, flags); in ata_eh_about_to_do()
1304 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) in ata_eh_about_to_do()
1305 ap->pflags |= ATA_PFLAG_RECOVERED; in ata_eh_about_to_do()
1307 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_about_to_do()
1311 * ata_eh_done - EH action complete
1313 * @dev: target ATA dev for per-dev action (can be NULL)
1317 * in @link->eh_context.
1325 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_done()
1327 trace_ata_eh_done(link, dev ? dev->devno : 0, action); in ata_eh_done()
1329 ata_eh_clear_action(link, dev, &ehc->i, action); in ata_eh_done()
1333 * ata_err_string - convert err_mask to descriptive string
1336 * Convert @err_mask to descriptive string. Errors are
1372 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1403 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1417 struct scsi_cmnd *cmd = qc->scsicmd; in ata_eh_request_sense()
1418 struct ata_device *dev = qc->dev; in ata_eh_request_sense()
1422 if (ata_port_is_frozen(qc->ap)) { in ata_eh_request_sense()
1427 if (!ata_id_sense_reporting_enabled(dev->id)) { in ata_eh_request_sense()
1428 ata_dev_warn(qc->dev, "sense data reporting disabled\n"); in ata_eh_request_sense()
1442 /* Set sense without also setting scsicmd->result */ in ata_eh_request_sense()
1443 scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE, in ata_eh_request_sense()
1444 cmd->sense_buffer, tf.lbah, in ata_eh_request_sense()
1446 qc->flags |= ATA_QCFLAG_SENSE_VALID; in ata_eh_request_sense()
1458 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1477 struct ata_port *ap = dev->link->ap; in atapi_eh_request_sense()
1483 * for the case where they are -not- overwritten in atapi_eh_request_sense()
1495 if (ap->flags & ATA_FLAG_PIO_DMA) { in atapi_eh_request_sense()
1509 * ata_eh_analyze_serror - analyze SError for a failed port
1520 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_analyze_serror()
1521 u32 serror = ehc->i.serror; in ata_eh_analyze_serror()
1543 if (link->lpm_policy > ATA_LPM_MAX_POWER) in ata_eh_analyze_serror()
1545 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) in ata_eh_analyze_serror()
1551 ata_ehi_hotplugged(&ehc->i); in ata_eh_analyze_serror()
1553 ehc->i.err_mask |= err_mask; in ata_eh_analyze_serror()
1554 ehc->i.action |= action; in ata_eh_analyze_serror()
1558 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1573 const struct ata_taskfile *tf = &qc->result_tf; in ata_eh_analyze_tf()
1575 u8 stat = tf->status, err = tf->error; in ata_eh_analyze_tf()
1578 qc->err_mask |= AC_ERR_HSM; in ata_eh_analyze_tf()
1583 qc->err_mask |= AC_ERR_DEV; in ata_eh_analyze_tf()
1594 switch (qc->dev->class) { in ata_eh_analyze_tf()
1599 * -It was a non-NCQ command that failed, or in ata_eh_analyze_tf()
1600 * -It was a NCQ command that failed, but the sense data in ata_eh_analyze_tf()
1604 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID) && in ata_eh_analyze_tf()
1606 set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); in ata_eh_analyze_tf()
1608 qc->err_mask |= AC_ERR_ATA_BUS; in ata_eh_analyze_tf()
1610 qc->err_mask |= AC_ERR_MEDIA; in ata_eh_analyze_tf()
1612 qc->err_mask |= AC_ERR_INVALID; in ata_eh_analyze_tf()
1616 if (!ata_port_is_frozen(qc->ap)) { in ata_eh_analyze_tf()
1617 tmp = atapi_eh_request_sense(qc->dev, in ata_eh_analyze_tf()
1618 qc->scsicmd->sense_buffer, in ata_eh_analyze_tf()
1619 qc->result_tf.error >> 4); in ata_eh_analyze_tf()
1621 qc->flags |= ATA_QCFLAG_SENSE_VALID; in ata_eh_analyze_tf()
1623 qc->err_mask |= tmp; in ata_eh_analyze_tf()
1627 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { in ata_eh_analyze_tf()
1628 enum scsi_disposition ret = scsi_check_sense(qc->scsicmd); in ata_eh_analyze_tf()
1639 qc->flags |= ATA_QCFLAG_RETRY; in ata_eh_analyze_tf()
1640 qc->err_mask |= AC_ERR_OTHER; in ata_eh_analyze_tf()
1642 qc->err_mask |= AC_ERR_HSM; in ata_eh_analyze_tf()
1645 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) in ata_eh_analyze_tf()
1690 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) in speed_down_verdict_cb()
1691 return -1; in speed_down_verdict_cb()
1693 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, in speed_down_verdict_cb()
1694 &arg->xfer_ok); in speed_down_verdict_cb()
1695 arg->nr_errors[cat]++; in speed_down_verdict_cb()
1701 * ata_eh_speed_down_verdict - Determine speed down verdict
1728 * taken per error. An action triggered by non-DUBIOUS errors
1729 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1734 * DUBIOUS errors.
1736 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1739 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1742 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1745 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1748 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1749 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1766 arg.since = j64 - min(j64, j5mins); in ata_eh_speed_down_verdict()
1767 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); in ata_eh_speed_down_verdict()
1785 arg.since = j64 - min(j64, j10mins); in ata_eh_speed_down_verdict()
1786 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); in ata_eh_speed_down_verdict()
1801 * ata_eh_speed_down - record error and speed down if necessary
1825 /* don't bother if Cat-0 error */ in ata_eh_speed_down()
1830 ata_ering_record(&dev->ering, eflags, err_mask); in ata_eh_speed_down()
1835 dev->flags |= ATA_DFLAG_NCQ_OFF; in ata_eh_speed_down()
1836 ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); in ata_eh_speed_down()
1849 if (dev->spdn_cnt < 2) { in ata_eh_speed_down()
1856 if (dev->xfer_shift != ATA_SHIFT_PIO) in ata_eh_speed_down()
1857 sel = dma_dnxfer_sel[dev->spdn_cnt]; in ata_eh_speed_down()
1859 sel = pio_dnxfer_sel[dev->spdn_cnt]; in ata_eh_speed_down()
1861 dev->spdn_cnt++; in ata_eh_speed_down()
1873 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && in ata_eh_speed_down()
1874 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && in ata_eh_speed_down()
1875 (dev->xfer_shift != ATA_SHIFT_PIO)) { in ata_eh_speed_down()
1877 dev->spdn_cnt = 0; in ata_eh_speed_down()
1887 ata_ering_clear(&dev->ering); in ata_eh_speed_down()
1892 * ata_eh_worth_retry - analyze error and decide whether to retry
1893 * @qc: qc to possibly retry
1895 * Look at the cause of the error and decide if a retry
1896 * might be useful or not. We don't want to retry media errors
1897 * because the drive itself has probably already taken 10-30 seconds
1902 if (qc->err_mask & AC_ERR_MEDIA) in ata_eh_worth_retry()
1903 return 0; /* don't retry media errors */ in ata_eh_worth_retry()
1904 if (qc->flags & ATA_QCFLAG_IO) in ata_eh_worth_retry()
1905 return 1; /* otherwise retry anything from fs stack */ in ata_eh_worth_retry()
1906 if (qc->err_mask & AC_ERR_INVALID) in ata_eh_worth_retry()
1907 return 0; /* don't retry these */ in ata_eh_worth_retry()
1908 return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ in ata_eh_worth_retry()
1912 * ata_eh_quiet - check if we need to be quiet about a command error
1920 if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET) in ata_eh_quiet()
1921 qc->flags |= ATA_QCFLAG_QUIET; in ata_eh_quiet()
1922 return qc->flags & ATA_QCFLAG_QUIET; in ata_eh_quiet()
1927 struct ata_port *ap = link->ap; in ata_eh_read_sense_success_non_ncq()
1930 qc = __ata_qc_from_tag(ap, link->active_tag); in ata_eh_read_sense_success_non_ncq()
1932 return -EIO; in ata_eh_read_sense_success_non_ncq()
1934 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_read_sense_success_non_ncq()
1935 !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || in ata_eh_read_sense_success_non_ncq()
1936 qc->err_mask) in ata_eh_read_sense_success_non_ncq()
1937 return -EIO; in ata_eh_read_sense_success_non_ncq()
1940 return -EIO; in ata_eh_read_sense_success_non_ncq()
1947 scsi_check_sense(qc->scsicmd); in ata_eh_read_sense_success_non_ncq()
1954 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_get_success_sense()
1955 struct ata_device *dev = link->device; in ata_eh_get_success_sense()
1956 struct ata_port *ap = link->ap; in ata_eh_get_success_sense()
1960 if (!(ehc->i.dev_action[dev->devno] & ATA_EH_GET_SUCCESS_SENSE)) in ata_eh_get_success_sense()
1973 * data. Otherwise, we are dealing with a non-NCQ command and use in ata_eh_get_success_sense()
1976 if (link->sactive) in ata_eh_get_success_sense()
1997 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_get_success_sense()
1998 !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || in ata_eh_get_success_sense()
1999 qc->err_mask || in ata_eh_get_success_sense()
2000 ata_dev_phys_link(qc->dev) != link) in ata_eh_get_success_sense()
2004 if (qc->flags & ATA_QCFLAG_SENSE_VALID) in ata_eh_get_success_sense()
2008 if (!(qc->result_tf.status & ATA_SENSE)) in ata_eh_get_success_sense()
2012 ata_scsi_set_sense(dev, qc->scsicmd, ABORTED_COMMAND, 0, 0); in ata_eh_get_success_sense()
2013 qc->flags |= ATA_QCFLAG_SENSE_VALID; in ata_eh_get_success_sense()
2019 * ata_eh_link_autopsy - analyze error and determine recovery action
2031 struct ata_port *ap = link->ap; in ata_eh_link_autopsy()
2032 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_link_autopsy()
2040 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) in ata_eh_link_autopsy()
2046 ehc->i.serror |= serror; in ata_eh_link_autopsy()
2048 } else if (rc != -EOPNOTSUPP) { in ata_eh_link_autopsy()
2050 ehc->i.probe_mask |= ATA_ALL_DEVICES; in ata_eh_link_autopsy()
2051 ehc->i.action |= ATA_EH_RESET; in ata_eh_link_autopsy()
2052 ehc->i.err_mask |= AC_ERR_OTHER; in ata_eh_link_autopsy()
2067 if (ehc->i.err_mask & ~AC_ERR_OTHER) in ata_eh_link_autopsy()
2068 ehc->i.err_mask &= ~AC_ERR_OTHER; in ata_eh_link_autopsy()
2070 all_err_mask |= ehc->i.err_mask; in ata_eh_link_autopsy()
2073 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_link_autopsy()
2074 qc->flags & ATA_QCFLAG_RETRY || in ata_eh_link_autopsy()
2075 qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD || in ata_eh_link_autopsy()
2076 ata_dev_phys_link(qc->dev) != link) in ata_eh_link_autopsy()
2080 qc->err_mask |= ehc->i.err_mask; in ata_eh_link_autopsy()
2083 ehc->i.action |= ata_eh_analyze_tf(qc); in ata_eh_link_autopsy()
2085 /* DEV errors are probably spurious in case of ATA_BUS error */ in ata_eh_link_autopsy()
2086 if (qc->err_mask & AC_ERR_ATA_BUS) in ata_eh_link_autopsy()
2087 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | in ata_eh_link_autopsy()
2091 if (qc->err_mask & ~AC_ERR_OTHER) in ata_eh_link_autopsy()
2092 qc->err_mask &= ~AC_ERR_OTHER; in ata_eh_link_autopsy()
2101 if (qc->flags & ATA_QCFLAG_SENSE_VALID) in ata_eh_link_autopsy()
2102 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); in ata_eh_link_autopsy()
2104 qc->flags |= ATA_QCFLAG_RETRY; in ata_eh_link_autopsy()
2107 ehc->i.dev = qc->dev; in ata_eh_link_autopsy()
2108 all_err_mask |= qc->err_mask; in ata_eh_link_autopsy()
2109 if (qc->flags & ATA_QCFLAG_IO) in ata_eh_link_autopsy()
2113 /* Count quiet errors */ in ata_eh_link_autopsy()
2119 /* If all failed commands requested silence, then be quiet */ in ata_eh_link_autopsy()
2121 ehc->i.flags |= ATA_EHI_QUIET; in ata_eh_link_autopsy()
2126 ehc->i.action |= ATA_EH_RESET; in ata_eh_link_autopsy()
2129 ehc->i.action |= ATA_EH_REVALIDATE; in ata_eh_link_autopsy()
2132 * perform per-dev EH action only on the offending device. in ata_eh_link_autopsy()
2134 if (ehc->i.dev) { in ata_eh_link_autopsy()
2135 ehc->i.dev_action[ehc->i.dev->devno] |= in ata_eh_link_autopsy()
2136 ehc->i.action & ATA_EH_PERDEV_MASK; in ata_eh_link_autopsy()
2137 ehc->i.action &= ~ATA_EH_PERDEV_MASK; in ata_eh_link_autopsy()
2142 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; in ata_eh_link_autopsy()
2145 dev = ehc->i.dev; in ata_eh_link_autopsy()
2147 ata_dev_enabled(link->device)))) in ata_eh_link_autopsy()
2148 dev = link->device; in ata_eh_link_autopsy()
2151 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) in ata_eh_link_autopsy()
2153 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); in ata_eh_link_autopsy()
2154 trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); in ata_eh_link_autopsy()
2159 * ata_eh_autopsy - analyze error and determine recovery action
2162 * Analyze all links of @ap and determine why they failed and
2179 if (ap->slave_link) { in ata_eh_autopsy()
2180 struct ata_eh_context *mehc = &ap->link.eh_context; in ata_eh_autopsy()
2181 struct ata_eh_context *sehc = &ap->slave_link->eh_context; in ata_eh_autopsy()
2184 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; in ata_eh_autopsy()
2187 ata_eh_link_autopsy(ap->slave_link); in ata_eh_autopsy()
2190 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); in ata_eh_autopsy()
2191 mehc->i.action |= sehc->i.action; in ata_eh_autopsy()
2192 mehc->i.dev_action[1] |= sehc->i.dev_action[1]; in ata_eh_autopsy()
2193 mehc->i.flags |= sehc->i.flags; in ata_eh_autopsy()
2194 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); in ata_eh_autopsy()
2201 ata_eh_link_autopsy(&ap->link); in ata_eh_autopsy()
2205 * ata_get_cmd_name - get name for ATA command
2248 { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" }, in ata_get_cmd_name()
2277 { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, in ata_get_cmd_name()
2327 * ata_eh_link_report - report error handling to user
2337 struct ata_port *ap = link->ap; in ata_eh_link_report()
2338 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_link_report()
2344 if (ehc->i.flags & ATA_EHI_QUIET) in ata_eh_link_report()
2348 if (ehc->i.desc[0] != '\0') in ata_eh_link_report()
2349 desc = ehc->i.desc; in ata_eh_link_report()
2352 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_link_report()
2353 ata_dev_phys_link(qc->dev) != link || in ata_eh_link_report()
2354 ((qc->flags & ATA_QCFLAG_QUIET) && in ata_eh_link_report()
2355 qc->err_mask == AC_ERR_DEV)) in ata_eh_link_report()
2357 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) in ata_eh_link_report()
2363 if (!nr_failed && !ehc->i.err_mask) in ata_eh_link_report()
2370 if (ap->eh_tries < ATA_EH_MAX_TRIES) in ata_eh_link_report()
2372 ap->eh_tries); in ata_eh_link_report()
2374 if (ehc->i.dev) { in ata_eh_link_report()
2375 ata_dev_err(ehc->i.dev, "exception Emask 0x%x " in ata_eh_link_report()
2377 ehc->i.err_mask, link->sactive, ehc->i.serror, in ata_eh_link_report()
2378 ehc->i.action, frozen, tries_buf); in ata_eh_link_report()
2380 ata_dev_err(ehc->i.dev, "%s\n", desc); in ata_eh_link_report()
2384 ehc->i.err_mask, link->sactive, ehc->i.serror, in ata_eh_link_report()
2385 ehc->i.action, frozen, tries_buf); in ata_eh_link_report()
2391 if (ehc->i.serror) in ata_eh_link_report()
2394 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", in ata_eh_link_report()
2395 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", in ata_eh_link_report()
2396 ehc->i.serror & SERR_DATA ? "UnrecovData " : "", in ata_eh_link_report()
2397 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", in ata_eh_link_report()
2398 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", in ata_eh_link_report()
2399 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", in ata_eh_link_report()
2400 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", in ata_eh_link_report()
2401 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", in ata_eh_link_report()
2402 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", in ata_eh_link_report()
2403 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", in ata_eh_link_report()
2404 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", in ata_eh_link_report()
2405 ehc->i.serror & SERR_CRC ? "BadCRC " : "", in ata_eh_link_report()
2406 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", in ata_eh_link_report()
2407 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", in ata_eh_link_report()
2408 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", in ata_eh_link_report()
2409 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", in ata_eh_link_report()
2410 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); in ata_eh_link_report()
2414 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; in ata_eh_link_report()
2418 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_link_report()
2419 ata_dev_phys_link(qc->dev) != link || !qc->err_mask) in ata_eh_link_report()
2422 if (qc->dma_dir != DMA_NONE) { in ata_eh_link_report()
2430 switch (qc->tf.protocol) { in ata_eh_link_report()
2460 prot_str, qc->nbytes, dma_str[qc->dma_dir]); in ata_eh_link_report()
2463 if (ata_is_atapi(qc->tf.protocol)) { in ata_eh_link_report()
2464 const u8 *cdb = qc->cdb; in ata_eh_link_report()
2465 size_t cdb_len = qc->dev->cdb_len; in ata_eh_link_report()
2467 if (qc->scsicmd) { in ata_eh_link_report()
2468 cdb = qc->scsicmd->cmnd; in ata_eh_link_report()
2469 cdb_len = qc->scsicmd->cmd_len; in ata_eh_link_report()
2474 ata_dev_err(qc->dev, "failed command: %s\n", in ata_eh_link_report()
2475 ata_get_cmd_name(cmd->command)); in ata_eh_link_report()
2477 ata_dev_err(qc->dev, in ata_eh_link_report()
2482 cmd->command, cmd->feature, cmd->nsect, in ata_eh_link_report()
2483 cmd->lbal, cmd->lbam, cmd->lbah, in ata_eh_link_report()
2484 cmd->hob_feature, cmd->hob_nsect, in ata_eh_link_report()
2485 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, in ata_eh_link_report()
2486 cmd->device, qc->tag, data_buf, cdb_buf, in ata_eh_link_report()
2487 res->status, res->error, res->nsect, in ata_eh_link_report()
2488 res->lbal, res->lbam, res->lbah, in ata_eh_link_report()
2489 res->hob_feature, res->hob_nsect, in ata_eh_link_report()
2490 res->hob_lbal, res->hob_lbam, res->hob_lbah, in ata_eh_link_report()
2491 res->device, qc->err_mask, ata_err_string(qc->err_mask), in ata_eh_link_report()
2492 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); in ata_eh_link_report()
2495 if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | in ata_eh_link_report()
2497 if (res->status & ATA_BUSY) in ata_eh_link_report()
2498 ata_dev_err(qc->dev, "status: { Busy }\n"); in ata_eh_link_report()
2500 ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", in ata_eh_link_report()
2501 res->status & ATA_DRDY ? "DRDY " : "", in ata_eh_link_report()
2502 res->status & ATA_DF ? "DF " : "", in ata_eh_link_report()
2503 res->status & ATA_DRQ ? "DRQ " : "", in ata_eh_link_report()
2504 res->status & ATA_SENSE ? "SENSE " : "", in ata_eh_link_report()
2505 res->status & ATA_ERR ? "ERR " : ""); in ata_eh_link_report()
2508 if (cmd->command != ATA_CMD_PACKET && in ata_eh_link_report()
2509 (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF | in ata_eh_link_report()
2511 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", in ata_eh_link_report()
2512 res->error & ATA_ICRC ? "ICRC " : "", in ata_eh_link_report()
2513 res->error & ATA_UNC ? "UNC " : "", in ata_eh_link_report()
2514 res->error & ATA_AMNF ? "AMNF " : "", in ata_eh_link_report()
2515 res->error & ATA_IDNF ? "IDNF " : "", in ata_eh_link_report()
2516 res->error & ATA_ABORTED ? "ABRT " : ""); in ata_eh_link_report()
2522 * ata_eh_report - report error handling to user
2545 ata_for_each_dev(dev, link, ALL) in ata_do_reset()
2546 classes[dev->devno] = ATA_DEV_UNKNOWN; in ata_do_reset()
2553 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) in ata_eh_followup_srst_needed()
2555 if (rc == -EAGAIN) in ata_eh_followup_srst_needed()
2557 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) in ata_eh_followup_srst_needed()
2566 struct ata_port *ap = link->ap; in ata_eh_reset()
2567 struct ata_link *slave = ap->slave_link; in ata_eh_reset()
2568 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_reset()
2569 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; in ata_eh_reset()
2570 unsigned int *classes = ehc->classes; in ata_eh_reset()
2571 unsigned int lflags = link->flags; in ata_eh_reset()
2572 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); in ata_eh_reset()
2587 if (link->flags & ATA_LFLAG_RST_ONCE) in ata_eh_reset()
2589 if (link->flags & ATA_LFLAG_NO_HRST) in ata_eh_reset()
2591 if (link->flags & ATA_LFLAG_NO_SRST) in ata_eh_reset()
2595 if (ehc->i.flags & ATA_EHI_DID_RESET) { in ata_eh_reset()
2597 WARN_ON(time_after(ehc->last_reset, now)); in ata_eh_reset()
2598 deadline = ata_deadline(ehc->last_reset, in ata_eh_reset()
2601 schedule_timeout_uninterruptible(deadline - now); in ata_eh_reset()
2604 spin_lock_irqsave(ap->lock, flags); in ata_eh_reset()
2605 ap->pflags |= ATA_PFLAG_RESETTING; in ata_eh_reset()
2606 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_reset()
2610 ata_for_each_dev(dev, link, ALL) { in ata_eh_reset()
2618 dev->pio_mode = XFER_PIO_0; in ata_eh_reset()
2619 dev->dma_mode = 0xff; in ata_eh_reset()
2626 if (ap->ops->set_piomode) in ata_eh_reset()
2627 ap->ops->set_piomode(ap, dev); in ata_eh_reset()
2632 ehc->i.action &= ~ATA_EH_RESET; in ata_eh_reset()
2635 ehc->i.action |= ATA_EH_HARDRESET; in ata_eh_reset()
2638 ehc->i.action |= ATA_EH_SOFTRESET; in ata_eh_reset()
2646 sehc->i.action &= ~ATA_EH_RESET; in ata_eh_reset()
2647 sehc->i.action |= ehc->i.action; in ata_eh_reset()
2654 * -ENOENT or clear ATA_EH_RESET. in ata_eh_reset()
2656 if (slave && (rc == 0 || rc == -ENOENT)) { in ata_eh_reset()
2660 if (tmp != -ENOENT) in ata_eh_reset()
2663 ehc->i.action |= sehc->i.action; in ata_eh_reset()
2667 if (rc == -ENOENT) { in ata_eh_reset()
2668 ata_link_dbg(link, "port disabled--ignoring\n"); in ata_eh_reset()
2669 ehc->i.action &= ~ATA_EH_RESET; in ata_eh_reset()
2671 ata_for_each_dev(dev, link, ALL) in ata_eh_reset()
2672 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2685 if (reset && !(ehc->i.action & ATA_EH_RESET)) { in ata_eh_reset()
2686 ata_for_each_dev(dev, link, ALL) in ata_eh_reset()
2687 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2695 retry: in ata_eh_reset()
2710 ehc->last_reset = jiffies; in ata_eh_reset()
2712 ehc->i.flags |= ATA_EHI_DID_HARDRESET; in ata_eh_reset()
2715 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; in ata_eh_reset()
2724 if (rc && rc != -EAGAIN) { in ata_eh_reset()
2743 case -EAGAIN: in ata_eh_reset()
2744 rc = -EAGAIN; in ata_eh_reset()
2755 /* perform follow-up SRST if necessary */ in ata_eh_reset()
2762 "follow-up softreset required but no softreset available\n"); in ata_eh_reset()
2764 rc = -EINVAL; in ata_eh_reset()
2786 * Post-reset processing in ata_eh_reset()
2788 ata_for_each_dev(dev, link, ALL) { in ata_eh_reset()
2793 dev->pio_mode = XFER_PIO_0; in ata_eh_reset()
2794 dev->flags &= ~ATA_DFLAG_SLEEPING; in ata_eh_reset()
2801 classes[dev->devno] = ATA_DEV_ATA; in ata_eh_reset()
2803 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; in ata_eh_reset()
2808 link->sata_spd = (sstatus >> 4) & 0xf; in ata_eh_reset()
2810 slave->sata_spd = (sstatus >> 4) & 0xf; in ata_eh_reset()
2833 spin_lock_irqsave(link->ap->lock, flags); in ata_eh_reset()
2834 link->eh_info.serror = 0; in ata_eh_reset()
2836 slave->eh_info.serror = 0; in ata_eh_reset()
2837 spin_unlock_irqrestore(link->ap->lock, flags); in ata_eh_reset()
2848 ata_for_each_dev(dev, link, ALL) { in ata_eh_reset()
2850 if (classes[dev->devno] == ATA_DEV_UNKNOWN) { in ata_eh_reset()
2852 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2856 if (ata_class_enabled(classes[dev->devno])) in ata_eh_reset()
2859 classes[dev->devno]); in ata_eh_reset()
2860 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2861 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { in ata_eh_reset()
2864 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2874 rc = -EAGAIN; in ata_eh_reset()
2886 ehc->last_reset = jiffies; /* update to completion time */ in ata_eh_reset()
2887 ehc->i.action |= ATA_EH_REVALIDATE; in ata_eh_reset()
2888 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ in ata_eh_reset()
2893 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; in ata_eh_reset()
2895 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; in ata_eh_reset()
2897 spin_lock_irqsave(ap->lock, flags); in ata_eh_reset()
2898 ap->pflags &= ~ATA_PFLAG_RESETTING; in ata_eh_reset()
2899 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_reset()
2904 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ in ata_eh_reset()
2907 rc = -ERESTART; in ata_eh_reset()
2925 unsigned long delta = deadline - now; in ata_eh_reset()
2939 * They need to be reset - as well as the PMP - before retrying. in ata_eh_reset()
2941 if (rc == -ERESTART) { in ata_eh_reset()
2947 if (try == max_tries - 1) { in ata_eh_reset()
2951 } else if (rc == -EPIPE) in ata_eh_reset()
2956 goto retry; in ata_eh_reset()
2977 * Additionally, all write accesses to &ap->park_req_pending in ata_eh_pull_park_action()
2982 * *all* devices on port ap have been pulled into the in ata_eh_pull_park_action()
2984 * park_req_pending.done is non-zero by the time we reach in ata_eh_pull_park_action()
2991 spin_lock_irqsave(ap->lock, flags); in ata_eh_pull_park_action()
2992 reinit_completion(&ap->park_req_pending); in ata_eh_pull_park_action()
2994 ata_for_each_dev(dev, link, ALL) { in ata_eh_pull_park_action()
2995 struct ata_eh_info *ehi = &link->eh_info; in ata_eh_pull_park_action()
2997 link->eh_context.i.dev_action[dev->devno] |= in ata_eh_pull_park_action()
2998 ehi->dev_action[dev->devno] & ATA_EH_PARK; in ata_eh_pull_park_action()
3002 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_pull_park_action()
3007 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_park_issue_cmd()
3013 ehc->unloaded_mask |= 1 << dev->devno; in ata_eh_park_issue_cmd()
3020 ehc->unloaded_mask &= ~(1 << dev->devno); in ata_eh_park_issue_cmd()
3029 ehc->unloaded_mask &= ~(1 << dev->devno); in ata_eh_park_issue_cmd()
3036 struct ata_port *ap = link->ap; in ata_eh_revalidate_and_attach()
3037 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_revalidate_and_attach()
3044 * be done backwards such that PDIAG- is released by the slave in ata_eh_revalidate_and_attach()
3051 if (ehc->i.flags & ATA_EHI_DID_RESET) in ata_eh_revalidate_and_attach()
3055 WARN_ON(dev->class == ATA_DEV_PMP); in ata_eh_revalidate_and_attach()
3065 * to ap->target_lpm_policy after revalidation is done. in ata_eh_revalidate_and_attach()
3067 if (link->lpm_policy > ATA_LPM_MAX_POWER) { in ata_eh_revalidate_and_attach()
3075 rc = -EIO; in ata_eh_revalidate_and_attach()
3080 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], in ata_eh_revalidate_and_attach()
3090 ehc->i.flags |= ATA_EHI_SETMODE; in ata_eh_revalidate_and_attach()
3093 schedule_delayed_work(&ap->scsi_rescan_task, 0); in ata_eh_revalidate_and_attach()
3094 } else if (dev->class == ATA_DEV_UNKNOWN && in ata_eh_revalidate_and_attach()
3095 ehc->tries[dev->devno] && in ata_eh_revalidate_and_attach()
3096 ata_class_enabled(ehc->classes[dev->devno])) { in ata_eh_revalidate_and_attach()
3097 /* Temporarily set dev->class, it will be in ata_eh_revalidate_and_attach()
3098 * permanently set once all configurations are in ata_eh_revalidate_and_attach()
3103 dev->class = ehc->classes[dev->devno]; in ata_eh_revalidate_and_attach()
3105 if (dev->class == ATA_DEV_PMP) in ata_eh_revalidate_and_attach()
3108 rc = ata_dev_read_id(dev, &dev->class, in ata_eh_revalidate_and_attach()
3109 readid_flags, dev->id); in ata_eh_revalidate_and_attach()
3112 ehc->classes[dev->devno] = dev->class; in ata_eh_revalidate_and_attach()
3113 dev->class = ATA_DEV_UNKNOWN; in ata_eh_revalidate_and_attach()
3118 ata_ering_clear(&dev->ering); in ata_eh_revalidate_and_attach()
3119 new_mask |= 1 << dev->devno; in ata_eh_revalidate_and_attach()
3121 case -ENOENT: in ata_eh_revalidate_and_attach()
3122 /* IDENTIFY was issued to non-existent in ata_eh_revalidate_and_attach()
3134 /* PDIAG- should have been released, ask cable type if post-reset */ in ata_eh_revalidate_and_attach()
3135 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { in ata_eh_revalidate_and_attach()
3136 if (ap->ops->cable_detect) in ata_eh_revalidate_and_attach()
3137 ap->cbl = ap->ops->cable_detect(ap); in ata_eh_revalidate_and_attach()
3144 ata_for_each_dev(dev, link, ALL) { in ata_eh_revalidate_and_attach()
3145 if (!(new_mask & (1 << dev->devno))) in ata_eh_revalidate_and_attach()
3148 dev->class = ehc->classes[dev->devno]; in ata_eh_revalidate_and_attach()
3150 if (dev->class == ATA_DEV_PMP) in ata_eh_revalidate_and_attach()
3153 ehc->i.flags |= ATA_EHI_PRINTINFO; in ata_eh_revalidate_and_attach()
3155 ehc->i.flags &= ~ATA_EHI_PRINTINFO; in ata_eh_revalidate_and_attach()
3157 dev->class = ATA_DEV_UNKNOWN; in ata_eh_revalidate_and_attach()
3161 spin_lock_irqsave(ap->lock, flags); in ata_eh_revalidate_and_attach()
3162 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; in ata_eh_revalidate_and_attach()
3163 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_revalidate_and_attach()
3166 ehc->i.flags |= ATA_EHI_SETMODE; in ata_eh_revalidate_and_attach()
3177 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3193 struct ata_port *ap = link->ap; in ata_set_mode()
3199 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { in ata_set_mode()
3202 ent = ata_ering_top(&dev->ering); in ata_set_mode()
3204 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; in ata_set_mode()
3209 if (ap->ops->set_mode) in ata_set_mode()
3210 rc = ap->ops->set_mode(link, r_failed_dev); in ata_set_mode()
3216 struct ata_eh_context *ehc = &link->eh_context; in ata_set_mode()
3217 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; in ata_set_mode()
3218 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); in ata_set_mode()
3220 if (dev->xfer_mode != saved_xfer_mode || in ata_set_mode()
3222 dev->flags |= ATA_DFLAG_DUBIOUS_XFER; in ata_set_mode()
3229 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3240 * 0 on success, -errno on failure.
3247 u8 *sense_buffer = dev->link->ap->sector_buf; in atapi_eh_clear_ua()
3256 return -EIO; in atapi_eh_clear_ua()
3266 return -EIO; in atapi_eh_clear_ua()
3277 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3278 * @dev: ATA device which may need FLUSH retry
3287 * This function determines whether FLUSH failure retry is
3291 * 0 if EH can continue, -errno if EH needs to be repeated.
3295 struct ata_link *link = dev->link; in ata_eh_maybe_retry_flush()
3296 struct ata_port *ap = link->ap; in ata_eh_maybe_retry_flush()
3303 if (!ata_tag_valid(link->active_tag)) in ata_eh_maybe_retry_flush()
3306 qc = __ata_qc_from_tag(ap, link->active_tag); in ata_eh_maybe_retry_flush()
3307 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && in ata_eh_maybe_retry_flush()
3308 qc->tf.command != ATA_CMD_FLUSH)) in ata_eh_maybe_retry_flush()
3312 if (qc->err_mask & AC_ERR_DEV) in ata_eh_maybe_retry_flush()
3318 tf.command = qc->tf.command; in ata_eh_maybe_retry_flush()
3323 tf.command, qc->err_mask); in ata_eh_maybe_retry_flush()
3330 * Making sure retry is allowed at least once and in ata_eh_maybe_retry_flush()
3331 * retrying it should do the trick - whatever was in in ata_eh_maybe_retry_flush()
3335 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); in ata_eh_maybe_retry_flush()
3339 rc = -EIO; in ata_eh_maybe_retry_flush()
3343 qc->err_mask |= AC_ERR_DEV; in ata_eh_maybe_retry_flush()
3344 qc->result_tf = tf; in ata_eh_maybe_retry_flush()
3353 * ata_eh_set_lpm - configure SATA interface power management
3367 * 0 on success, -errno on failure.
3372 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; in ata_eh_set_lpm()
3373 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_set_lpm()
3375 enum ata_lpm_policy old_policy = link->lpm_policy; in ata_eh_set_lpm()
3376 bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; in ata_eh_set_lpm()
3383 (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) in ata_eh_set_lpm()
3393 bool hipm = ata_id_has_hipm(dev->id); in ata_eh_set_lpm()
3394 bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; in ata_eh_set_lpm()
3415 rc = -EIO; in ata_eh_set_lpm()
3422 rc = ap->ops->set_lpm(link, policy, hints); in ata_eh_set_lpm()
3423 if (!rc && ap->slave_link) in ata_eh_set_lpm()
3424 rc = ap->ops->set_lpm(ap->slave_link, policy, hints); in ata_eh_set_lpm()
3433 if (rc == -EOPNOTSUPP) { in ata_eh_set_lpm()
3434 link->flags |= ATA_LFLAG_NO_LPM; in ata_eh_set_lpm()
3445 link->lpm_policy = policy; in ata_eh_set_lpm()
3446 if (ap && ap->slave_link) in ata_eh_set_lpm()
3447 ap->slave_link->lpm_policy = policy; in ata_eh_set_lpm()
3452 ata_id_has_dipm(dev->id)) { in ata_eh_set_lpm()
3459 rc = -EIO; in ata_eh_set_lpm()
3465 link->last_lpm_change = jiffies; in ata_eh_set_lpm()
3466 link->flags |= ATA_LFLAG_CHANGED; in ata_eh_set_lpm()
3472 link->lpm_policy = old_policy; in ata_eh_set_lpm()
3473 if (ap && ap->slave_link) in ata_eh_set_lpm()
3474 ap->slave_link->lpm_policy = old_policy; in ata_eh_set_lpm()
3477 if (!dev || ehc->tries[dev->devno] <= 2) { in ata_eh_set_lpm()
3479 link->flags |= ATA_LFLAG_NO_LPM; in ata_eh_set_lpm()
3501 ata_for_each_dev(dev, link, ALL) in ata_link_nr_vacant()
3502 if (dev->class == ATA_DEV_UNKNOWN) in ata_link_nr_vacant()
3509 struct ata_port *ap = link->ap; in ata_eh_skip_recovery()
3510 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_skip_recovery()
3514 if (link->flags & ATA_LFLAG_DISABLED) in ata_eh_skip_recovery()
3518 if (ehc->i.flags & ATA_EHI_NO_RECOVERY) in ata_eh_skip_recovery()
3526 if ((ehc->i.action & ATA_EH_RESET) && in ata_eh_skip_recovery()
3527 !(ehc->i.flags & ATA_EHI_DID_RESET)) in ata_eh_skip_recovery()
3530 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ in ata_eh_skip_recovery()
3531 ata_for_each_dev(dev, link, ALL) { in ata_eh_skip_recovery()
3532 if (dev->class == ATA_DEV_UNKNOWN && in ata_eh_skip_recovery()
3533 ehc->classes[dev->devno] != ATA_DEV_NONE) in ata_eh_skip_recovery()
3546 if ((ent->eflags & ATA_EFLAG_OLD_ER) || in ata_count_probe_trials_cb()
3547 (ent->timestamp < now - min(now, interval))) in ata_count_probe_trials_cb()
3548 return -1; in ata_count_probe_trials_cb()
3556 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_schedule_probe()
3560 if (!(ehc->i.probe_mask & (1 << dev->devno)) || in ata_eh_schedule_probe()
3561 (ehc->did_probe_mask & (1 << dev->devno))) in ata_eh_schedule_probe()
3566 ehc->did_probe_mask |= (1 << dev->devno); in ata_eh_schedule_probe()
3567 ehc->i.action |= ATA_EH_RESET; in ata_eh_schedule_probe()
3568 ehc->saved_xfer_mode[dev->devno] = 0; in ata_eh_schedule_probe()
3569 ehc->saved_ncq_enabled &= ~(1 << dev->devno); in ata_eh_schedule_probe()
3572 if (link->lpm_policy > ATA_LPM_MAX_POWER) { in ata_eh_schedule_probe()
3574 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, in ata_eh_schedule_probe()
3594 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); in ata_eh_schedule_probe()
3595 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); in ata_eh_schedule_probe()
3605 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_handle_dev_fail()
3607 /* -EAGAIN from EH routine indicates retry without prejudice. in ata_eh_handle_dev_fail()
3610 if (err != -EAGAIN) in ata_eh_handle_dev_fail()
3611 ehc->tries[dev->devno]--; in ata_eh_handle_dev_fail()
3614 case -ENODEV: in ata_eh_handle_dev_fail()
3616 ehc->i.probe_mask |= (1 << dev->devno); in ata_eh_handle_dev_fail()
3618 case -EINVAL: in ata_eh_handle_dev_fail()
3620 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); in ata_eh_handle_dev_fail()
3622 case -EIO: in ata_eh_handle_dev_fail()
3623 if (ehc->tries[dev->devno] == 1) { in ata_eh_handle_dev_fail()
3628 if (dev->pio_mode > XFER_PIO_0) in ata_eh_handle_dev_fail()
3633 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { in ata_eh_handle_dev_fail()
3634 /* disable device if it has used up all its chances */ in ata_eh_handle_dev_fail()
3643 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; in ata_eh_handle_dev_fail()
3644 memset(ehc->cmd_timeout_idx[dev->devno], 0, in ata_eh_handle_dev_fail()
3645 sizeof(ehc->cmd_timeout_idx[dev->devno])); in ata_eh_handle_dev_fail()
3650 ehc->i.action |= ATA_EH_RESET; in ata_eh_handle_dev_fail()
3656 * ata_eh_recover - recover host port after error
3667 * link's eh_context. This function executes all the operations
3675 * 0 on success, -errno on failure.
3689 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3691 /* re-enable link? */ in ata_eh_recover()
3692 if (ehc->i.action & ATA_EH_ENABLE_LINK) { in ata_eh_recover()
3694 spin_lock_irqsave(ap->lock, flags); in ata_eh_recover()
3695 link->flags &= ~ATA_LFLAG_DISABLED; in ata_eh_recover()
3696 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_recover()
3700 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3701 if (link->flags & ATA_LFLAG_NO_RETRY) in ata_eh_recover()
3702 ehc->tries[dev->devno] = 1; in ata_eh_recover()
3704 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; in ata_eh_recover()
3707 ehc->i.action |= ehc->i.dev_action[dev->devno] & in ata_eh_recover()
3709 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; in ata_eh_recover()
3712 if (dev->flags & ATA_DFLAG_DETACH) in ata_eh_recover()
3721 retry: in ata_eh_recover()
3725 if (ap->pflags & ATA_PFLAG_UNLOADING) in ata_eh_recover()
3730 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3734 ehc->i.action = 0; in ata_eh_recover()
3736 ata_for_each_dev(dev, link, ALL) in ata_eh_recover()
3737 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; in ata_eh_recover()
3742 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3744 if (!(ehc->i.action & ATA_EH_RESET)) in ata_eh_recover()
3760 * ap->park_req_pending in ata_eh_recover()
3766 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3767 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3770 if (dev->class != ATA_DEV_ATA && in ata_eh_recover()
3771 dev->class != ATA_DEV_ZAC) in ata_eh_recover()
3773 if (!(ehc->i.dev_action[dev->devno] & in ata_eh_recover()
3776 tmp = dev->unpark_deadline; in ata_eh_recover()
3781 if (ehc->unloaded_mask & (1 << dev->devno)) in ata_eh_recover()
3793 deadline = wait_for_completion_timeout(&ap->park_req_pending, in ata_eh_recover()
3794 deadline - now); in ata_eh_recover()
3798 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3799 if (!(link->eh_context.unloaded_mask & in ata_eh_recover()
3800 (1 << dev->devno))) in ata_eh_recover()
3811 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3822 if (link->device->class == ATA_DEV_PMP) { in ata_eh_recover()
3823 ehc->i.action = 0; in ata_eh_recover()
3828 if (ehc->i.flags & ATA_EHI_SETMODE) { in ata_eh_recover()
3832 ehc->i.flags &= ~ATA_EHI_SETMODE; in ata_eh_recover()
3838 if (ehc->i.flags & ATA_EHI_DID_RESET) { in ata_eh_recover()
3839 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3840 if (dev->class != ATA_DEV_ATAPI) in ata_eh_recover()
3855 if (ehc->i.dev_action[dev->devno] & ATA_EH_SET_ACTIVE) { in ata_eh_recover()
3861 /* retry flush if necessary */ in ata_eh_recover()
3862 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3863 if (dev->class != ATA_DEV_ATA && in ata_eh_recover()
3864 dev->class != ATA_DEV_ZAC) in ata_eh_recover()
3873 if (link->lpm_policy != ap->target_lpm_policy) { in ata_eh_recover()
3874 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); in ata_eh_recover()
3880 ehc->i.flags = 0; in ata_eh_recover()
3890 * Can't retry if it's frozen. in ata_eh_recover()
3899 goto retry; in ata_eh_recover()
3909 * ata_eh_finish - finish up EH
3912 * Recovery is complete. Clean up EH states and retry or finish
3923 /* retry or finish qcs */ in ata_eh_finish()
3925 if (!(qc->flags & ATA_QCFLAG_EH)) in ata_eh_finish()
3928 if (qc->err_mask) { in ata_eh_finish()
3933 if (qc->flags & ATA_QCFLAG_RETRY) { in ata_eh_finish()
3935 * Since qc->err_mask is set, ata_eh_qc_retry() in ata_eh_finish()
3936 * will not increment scmd->allowed, so upper in ata_eh_finish()
3937 * layer will only retry the command if it has in ata_eh_finish()
3945 if (qc->flags & ATA_QCFLAG_SENSE_VALID || in ata_eh_finish()
3946 qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) { in ata_eh_finish()
3950 memset(&qc->result_tf, 0, sizeof(qc->result_tf)); in ata_eh_finish()
3952 * Since qc->err_mask is not set, in ata_eh_finish()
3954 * scmd->allowed, so upper layer is guaranteed in ata_eh_finish()
3955 * to retry the command. in ata_eh_finish()
3963 WARN_ON(ap->nr_active_links); in ata_eh_finish()
3964 ap->nr_active_links = 0; in ata_eh_finish()
3968 * ata_do_eh - do standard error handling
3994 ata_for_each_dev(dev, &ap->link, ALL) in ata_do_eh()
4002 * ata_std_error_handler - standard error handler
4012 struct ata_port_operations *ops = ap->ops; in ata_std_error_handler()
4013 ata_reset_fn_t hardreset = ops->hardreset; in ata_std_error_handler()
4015 /* ignore built-in hardreset if SCR access is not available */ in ata_std_error_handler()
4016 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) in ata_std_error_handler()
4019 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); in ata_std_error_handler()
4025 * ata_eh_handle_port_suspend - perform port suspend operation
4041 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_suspend()
4042 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || in ata_eh_handle_port_suspend()
4043 ap->pm_mesg.event & PM_EVENT_RESUME) { in ata_eh_handle_port_suspend()
4044 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_suspend()
4047 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_suspend()
4049 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); in ata_eh_handle_port_suspend()
4051 /* Set all devices attached to the port in standby mode */ in ata_eh_handle_port_suspend()
4062 if (PMSG_IS_AUTO(ap->pm_mesg)) { in ata_eh_handle_port_suspend()
4063 ata_for_each_dev(dev, &ap->link, ENABLED) { in ata_eh_handle_port_suspend()
4072 if (ap->ops->port_suspend) in ata_eh_handle_port_suspend()
4073 rc = ap->ops->port_suspend(ap, ap->pm_mesg); in ata_eh_handle_port_suspend()
4075 ata_acpi_set_state(ap, ap->pm_mesg); in ata_eh_handle_port_suspend()
4078 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_suspend()
4080 ap->pflags &= ~ATA_PFLAG_PM_PENDING; in ata_eh_handle_port_suspend()
4082 ap->pflags |= ATA_PFLAG_SUSPENDED; in ata_eh_handle_port_suspend()
4086 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_suspend()
4092 * ata_eh_handle_port_resume - perform port resume operation
4107 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_resume()
4108 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || in ata_eh_handle_port_resume()
4109 !(ap->pm_mesg.event & PM_EVENT_RESUME)) { in ata_eh_handle_port_resume()
4110 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_resume()
4113 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_resume()
4115 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); in ata_eh_handle_port_resume()
4125 ata_for_each_dev(dev, link, ALL) in ata_eh_handle_port_resume()
4126 ata_ering_clear(&dev->ering); in ata_eh_handle_port_resume()
4128 ata_acpi_set_state(ap, ap->pm_mesg); in ata_eh_handle_port_resume()
4130 if (ap->ops->port_resume) in ata_eh_handle_port_resume()
4131 ap->ops->port_resume(ap); in ata_eh_handle_port_resume()
4137 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_resume()
4138 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); in ata_eh_handle_port_resume()
4139 ap->pflags |= ATA_PFLAG_RESUMING; in ata_eh_handle_port_resume()
4140 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_resume()