Lines Matching +full:tf +full:- +full:a
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
61 * decide whether to reenable DMA -- 3 is a random magic for now, in ide_end_rq()
64 if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) && in ide_end_rq()
65 drive->retry_pio <= 3) { in ide_end_rq()
66 drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY; in ide_end_rq()
71 if (rq == drive->sense_rq) { in ide_end_rq()
72 drive->sense_rq = NULL; in ide_end_rq()
73 drive->sense_rq_active = false; in ide_end_rq()
86 const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops; in ide_complete_cmd()
87 struct ide_taskfile *tf = &cmd->tf; in ide_complete_cmd() local
88 struct request *rq = cmd->rq; in ide_complete_cmd()
89 u8 tf_cmd = tf->command; in ide_complete_cmd()
91 tf->error = err; in ide_complete_cmd()
92 tf->status = stat; in ide_complete_cmd()
94 if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) { in ide_complete_cmd()
97 tp_ops->input_data(drive, cmd, data, 2); in ide_complete_cmd()
99 cmd->tf.data = data[0]; in ide_complete_cmd()
100 cmd->hob.data = data[1]; in ide_complete_cmd()
105 if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) && in ide_complete_cmd()
107 if (tf->lbal != 0xc4) { in ide_complete_cmd()
109 drive->name); in ide_complete_cmd()
110 ide_tf_dump(drive->name, cmd); in ide_complete_cmd()
112 drive->dev_flags |= IDE_DFLAG_PARKED; in ide_complete_cmd()
116 struct ide_cmd *orig_cmd = ide_req(rq)->special; in ide_complete_cmd()
118 if (cmd->tf_flags & IDE_TFLAG_DYN) in ide_complete_cmd()
127 ide_hwif_t *hwif = drive->hwif; in ide_complete_rq()
128 struct request *rq = hwif->rq; in ide_complete_rq()
132 * if failfast is set on a request, override number of sectors in ide_complete_rq()
140 hwif->rq = NULL; in ide_complete_rq()
148 u8 drv_req = ata_misc_request(rq) && rq->rq_disk; in ide_kill_rq()
149 u8 media = drive->media; in ide_kill_rq()
151 drive->failed_pc = NULL; in ide_kill_rq()
154 scsi_req(rq)->result = 0; in ide_kill_rq()
157 scsi_req(rq)->result = IDE_DRV_ERROR_GENERAL; in ide_kill_rq()
158 else if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0) in ide_kill_rq()
159 scsi_req(rq)->result = -EIO; in ide_kill_rq()
165 static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) in ide_tf_set_specify_cmd() argument
167 tf->nsect = drive->sect; in ide_tf_set_specify_cmd()
168 tf->lbal = drive->sect; in ide_tf_set_specify_cmd()
169 tf->lbam = drive->cyl; in ide_tf_set_specify_cmd()
170 tf->lbah = drive->cyl >> 8; in ide_tf_set_specify_cmd()
171 tf->device = (drive->head - 1) | drive->select; in ide_tf_set_specify_cmd()
172 tf->command = ATA_CMD_INIT_DEV_PARAMS; in ide_tf_set_specify_cmd()
175 static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf) in ide_tf_set_restore_cmd() argument
177 tf->nsect = drive->sect; in ide_tf_set_restore_cmd()
178 tf->command = ATA_CMD_RESTORE; in ide_tf_set_restore_cmd()
181 static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) in ide_tf_set_setmult_cmd() argument
183 tf->nsect = drive->mult_req; in ide_tf_set_setmult_cmd()
184 tf->command = ATA_CMD_SET_MULTI; in ide_tf_set_setmult_cmd()
188 * do_special - issue some special commands
192 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
200 printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__, in do_special()
201 drive->special_flags); in do_special()
203 if (drive->media != ide_disk) { in do_special()
204 drive->special_flags = 0; in do_special()
205 drive->mult_req = 0; in do_special()
212 if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) { in do_special()
213 drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY; in do_special()
214 ide_tf_set_specify_cmd(drive, &cmd.tf); in do_special()
215 } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) { in do_special()
216 drive->special_flags &= ~IDE_SFLAG_RECALIBRATE; in do_special()
217 ide_tf_set_restore_cmd(drive, &cmd.tf); in do_special()
218 } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) { in do_special()
219 drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE; in do_special()
220 ide_tf_set_setmult_cmd(drive, &cmd.tf); in do_special()
224 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; in do_special()
225 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; in do_special()
235 ide_hwif_t *hwif = drive->hwif; in ide_map_sg()
236 struct scatterlist *sg = hwif->sg_table, *last_sg = NULL; in ide_map_sg()
237 struct request *rq = cmd->rq; in ide_map_sg()
239 cmd->sg_nents = __blk_rq_map_sg(drive->queue, rq, sg, &last_sg); in ide_map_sg()
240 if (blk_rq_bytes(rq) && (blk_rq_bytes(rq) & rq->q->dma_pad_mask)) in ide_map_sg()
241 last_sg->length += in ide_map_sg()
242 (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; in ide_map_sg()
248 cmd->nbytes = cmd->nleft = nr_bytes; in ide_init_sg_cmd()
249 cmd->cursg_ofs = 0; in ide_init_sg_cmd()
250 cmd->cursg = NULL; in ide_init_sg_cmd()
255 * execute_drive_command - issue special drive command
259 * execute_drive_cmd() issues a special drive command, usually
261 * command can be a drive command, drive task or taskfile
269 struct ide_cmd *cmd = ide_req(rq)->special; in execute_drive_cmd()
272 if (cmd->protocol == ATA_PROT_PIO) { in execute_drive_cmd()
281 * NULL is actually a valid way of waiting for in execute_drive_cmd()
285 printk("%s: DRIVE_CMD (null)\n", drive->name); in execute_drive_cmd()
287 scsi_req(rq)->result = 0; in execute_drive_cmd()
295 u8 cmd = scsi_req(rq)->cmd[0]; in ide_special_rq()
311 * start_request - start of I/O and command issuing for IDE
313 * start_request() initiates handling of a new I/O request. It
316 * FIXME: this function needs a rename
325 drive->hwif->name, (unsigned long) rq); in start_request()
329 if (drive->max_failures && (drive->failures > drive->max_failures)) { in start_request()
330 rq->rq_flags |= RQF_FAILED; in start_request()
334 if (drive->prep_rq && !drive->prep_rq(drive, rq)) in start_request()
340 drive->hwif->tp_ops->dev_select(drive); in start_request()
341 if (ide_wait_stat(&startstop, drive, drive->ready_stat, in start_request()
343 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); in start_request()
347 if (drive->special_flags == 0) { in start_request()
351 * We reset the drive so we need to issue a SETFEATURES. in start_request()
354 if (drive->current_speed == 0xff) in start_request()
355 ide_config_drive_speed(drive, drive->desired_speed); in start_request()
360 struct ide_pm_state *pm = ide_req(rq)->special; in start_request()
363 drive->name, pm->pm_step); in start_request()
367 pm->pm_step == IDE_PM_COMPLETED) in start_request()
370 } else if (!rq->rq_disk && ata_misc_request(rq)) in start_request()
375 * check for ->rq_disk above may be replaced in start_request()
376 * by a more suitable mechanism or even in start_request()
381 drv = *(struct ide_driver **)rq->rq_disk->private_data; in start_request()
383 return drv->do_request(drive, rq, blk_rq_pos(rq)); in start_request()
392 * ide_stall_queue - pause an IDE device
396 * ide_stall_queue() can be used by a drive to give excess bandwidth back
404 drive->sleep = timeout + jiffies; in ide_stall_queue()
405 drive->dev_flags |= IDE_DFLAG_SLEEPING; in ide_stall_queue()
411 if (hwif->busy) in ide_lock_port()
414 hwif->busy = 1; in ide_lock_port()
421 hwif->busy = 0; in ide_unlock_port()
428 if (host->host_flags & IDE_HFLAG_SERIALIZE) { in ide_lock_host()
429 rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy); in ide_lock_host()
431 if (host->get_lock) in ide_lock_host()
432 host->get_lock(ide_intr, hwif); in ide_lock_host()
440 if (host->host_flags & IDE_HFLAG_SERIALIZE) { in ide_unlock_host()
441 if (host->release_lock) in ide_unlock_host()
442 host->release_lock(); in ide_unlock_host()
443 clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy); in ide_unlock_host()
449 struct request_queue *q = drive->queue; in ide_requeue_and_plug()
456 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); in ide_requeue_and_plug()
462 ide_hwif_t *hwif = drive->hwif; in ide_issue_rq()
463 struct ide_host *host = hwif->host; in ide_issue_rq()
466 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { in ide_issue_rq()
467 rq->rq_flags |= RQF_DONTPREP; in ide_issue_rq()
468 ide_req(rq)->special = NULL; in ide_issue_rq()
477 spin_lock_irq(&hwif->lock); in ide_issue_rq()
482 WARN_ON_ONCE(hwif->rq); in ide_issue_rq()
484 prev_port = hwif->host->cur_port; in ide_issue_rq()
485 if (drive->dev_flags & IDE_DFLAG_SLEEPING && in ide_issue_rq()
486 time_after(drive->sleep, jiffies)) { in ide_issue_rq()
491 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && in ide_issue_rq()
494 prev_port ? prev_port->cur_dev : NULL; in ide_issue_rq()
501 (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0) in ide_issue_rq()
502 prev_port->tp_ops->write_devctl(prev_port, in ide_issue_rq()
506 hwif->host->cur_port = hwif; in ide_issue_rq()
508 hwif->cur_dev = drive; in ide_issue_rq()
509 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); in ide_issue_rq()
512 * Sanity: don't accept a request that isn't a PM request in ide_issue_rq()
519 * We let requests forced at head of queue with ide-preempt in ide_issue_rq()
521 * unless the subdriver triggers such a thing in its own PM in ide_issue_rq()
524 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && in ide_issue_rq()
526 (rq->rq_flags & RQF_PREEMPT) == 0) { in ide_issue_rq()
532 scsi_req(rq)->resid_len = blk_rq_bytes(rq); in ide_issue_rq()
533 hwif->rq = rq; in ide_issue_rq()
535 spin_unlock_irq(&hwif->lock); in ide_issue_rq()
537 spin_lock_irq(&hwif->lock); in ide_issue_rq()
540 rq = hwif->rq; in ide_issue_rq()
541 hwif->rq = NULL; in ide_issue_rq()
550 list_add(&rq->queuelist, &drive->rq_list); in ide_issue_rq()
551 spin_unlock_irq(&hwif->lock); in ide_issue_rq()
559 spin_unlock_irq(&hwif->lock); in ide_issue_rq()
566 * Issue a new request to a device.
571 ide_drive_t *drive = hctx->queue->queuedata; in ide_queue_rq()
572 ide_hwif_t *hwif = drive->hwif; in ide_queue_rq()
574 spin_lock_irq(&hwif->lock); in ide_queue_rq()
575 if (drive->sense_rq_active) { in ide_queue_rq()
576 spin_unlock_irq(&hwif->lock); in ide_queue_rq()
579 spin_unlock_irq(&hwif->lock); in ide_queue_rq()
581 blk_mq_start_request(bd->rq); in ide_queue_rq()
582 return ide_issue_rq(drive, bd->rq, false); in ide_queue_rq()
587 ide_hwif_t *hwif = drive->hwif; in drive_is_ready()
590 if (drive->waiting_for_dma) in drive_is_ready()
591 return hwif->dma_ops->dma_test_irq(drive); in drive_is_ready()
593 if (hwif->io_ports.ctl_addr && in drive_is_ready()
594 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) in drive_is_ready()
595 stat = hwif->tp_ops->read_altstatus(hwif); in drive_is_ready()
597 /* Note: this may clear a pending IRQ!! */ in drive_is_ready()
598 stat = hwif->tp_ops->read_status(hwif); in drive_is_ready()
609 * ide_timer_expiry - handle lack of an IDE interrupt
628 int wait = -1; in ide_timer_expiry()
632 spin_lock_irqsave(&hwif->lock, flags); in ide_timer_expiry()
634 handler = hwif->handler; in ide_timer_expiry()
636 if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) { in ide_timer_expiry()
638 * Either a marginal timeout occurred in ide_timer_expiry()
640 * or we were "sleeping" to give other devices a chance. in ide_timer_expiry()
644 ide_expiry_t *expiry = hwif->expiry; in ide_timer_expiry()
647 drive = hwif->cur_dev; in ide_timer_expiry()
653 hwif->timer.expires = jiffies + wait; in ide_timer_expiry()
654 hwif->req_gen_timer = hwif->req_gen; in ide_timer_expiry()
655 add_timer(&hwif->timer); in ide_timer_expiry()
656 spin_unlock_irqrestore(&hwif->lock, flags); in ide_timer_expiry()
660 hwif->handler = NULL; in ide_timer_expiry()
661 hwif->expiry = NULL; in ide_timer_expiry()
663 * We need to simulate a real interrupt when invoking in ide_timer_expiry()
667 spin_unlock(&hwif->lock); in ide_timer_expiry()
669 disable_irq(hwif->irq); in ide_timer_expiry()
671 if (hwif->polling) { in ide_timer_expiry()
674 if (drive->waiting_for_dma) in ide_timer_expiry()
675 hwif->dma_ops->dma_lost_irq(drive); in ide_timer_expiry()
676 if (hwif->port_ops && hwif->port_ops->clear_irq) in ide_timer_expiry()
677 hwif->port_ops->clear_irq(drive); in ide_timer_expiry()
680 drive->name); in ide_timer_expiry()
683 if (drive->waiting_for_dma) in ide_timer_expiry()
687 hwif->tp_ops->read_status(hwif)); in ide_timer_expiry()
690 spin_lock_irq(&hwif->lock); in ide_timer_expiry()
691 enable_irq(hwif->irq); in ide_timer_expiry()
692 if (startstop == ide_stopped && hwif->polling == 0) { in ide_timer_expiry()
693 rq_in_flight = hwif->rq; in ide_timer_expiry()
694 hwif->rq = NULL; in ide_timer_expiry()
699 spin_unlock_irqrestore(&hwif->lock, flags); in ide_timer_expiry()
702 ide_unlock_host(hwif->host); in ide_timer_expiry()
708 * unexpected_intr - handle an unexpected IDE interrupt
725 * we could screw up by interfering with a new request being set up for
728 * In reality, this is a non-issue. The new command is not sent unless
732 * be accidentally invoked as a result of any valid command completion
738 u8 stat = hwif->tp_ops->read_status(hwif); in unexpected_intr()
749 hwif->name, stat, count); in unexpected_intr()
755 * ide_intr - default IDE interrupt handler
765 * a command. hwif->cur_dev is the drive and hwif->handler is
766 * the IRQ handler to call. As we issue a command the handlers
768 * next step in the process. Unlike a smart SCSI controller IDE
770 * stages. We also manage a poll timer to catch up with most
771 * timeout situations. There are still a few where the handlers
782 struct ide_host *host = hwif->host; in ide_intr()
791 if (host->host_flags & IDE_HFLAG_SERIALIZE) { in ide_intr()
792 if (hwif != host->cur_port) in ide_intr()
796 spin_lock_irqsave(&hwif->lock, flags); in ide_intr()
798 if (hwif->port_ops && hwif->port_ops->test_irq && in ide_intr()
799 hwif->port_ops->test_irq(hwif) == 0) in ide_intr()
802 handler = hwif->handler; in ide_intr()
804 if (handler == NULL || hwif->polling) { in ide_intr()
810 * or (2) a drive just entered sleep or standby mode, in ide_intr()
812 * or (3) a spurious interrupt of unknown origin. in ide_intr()
817 if ((host->irq_flags & IRQF_SHARED) == 0) { in ide_intr()
819 * Probably not a shared PCI interrupt, in ide_intr()
826 * we have a leftover pending IRQ. in ide_intr()
828 (void)hwif->tp_ops->read_status(hwif); in ide_intr()
833 drive = hwif->cur_dev; in ide_intr()
837 * This happens regularly when we share a PCI IRQ with in ide_intr()
841 * enough advance overhead that the latter isn't a problem. in ide_intr()
845 hwif->handler = NULL; in ide_intr()
846 hwif->expiry = NULL; in ide_intr()
847 hwif->req_gen++; in ide_intr()
848 del_timer(&hwif->timer); in ide_intr()
849 spin_unlock(&hwif->lock); in ide_intr()
851 if (hwif->port_ops && hwif->port_ops->clear_irq) in ide_intr()
852 hwif->port_ops->clear_irq(drive); in ide_intr()
854 if (drive->dev_flags & IDE_DFLAG_UNMASK) in ide_intr()
860 spin_lock_irq(&hwif->lock); in ide_intr()
868 if (startstop == ide_stopped && hwif->polling == 0) { in ide_intr()
869 BUG_ON(hwif->handler); in ide_intr()
870 rq_in_flight = hwif->rq; in ide_intr()
871 hwif->rq = NULL; in ide_intr()
877 spin_unlock_irqrestore(&hwif->lock, flags); in ide_intr()
880 ide_unlock_host(hwif->host); in ide_intr()
890 ide_hwif_t *hwif = drive->hwif; in ide_pad_transfer()
895 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len)); in ide_pad_transfer()
897 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len)); in ide_pad_transfer()
898 len -= 4; in ide_pad_transfer()
905 drive->sense_rq_active = true; in ide_insert_request_head()
906 list_add_tail(&rq->queuelist, &drive->rq_list); in ide_insert_request_head()
907 kblockd_schedule_work(&drive->rq_work); in ide_insert_request_head()