Lines Matching +full:pci +full:- +full:host +full:- +full:cam +full:- +full:generic
1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * of PCI-SCSI IO processors.
6 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
7 * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx>
10 * Copyright (C) 1998-2000 Gerard Roudier
13 * a port of the FreeBSD ncr driver to Linux-1.2.13.
17 * Stefan Esser <se@mi.Uni-Koeln.de>
25 *-----------------------------------------------------------------------------
68 MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters");
79 MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters");
121 #define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp))
122 #define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host)
125 * Complete a pending CAM CCB.
132 if (ucmd->eh_done) in sym_xpt_done()
133 complete(ucmd->eh_done); in sym_xpt_done()
136 cmd->scsi_done(cmd); in sym_xpt_done()
145 np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ; in sym_xpt_async_bus_reset()
146 np->s.settle_time_valid = 1; in sym_xpt_async_bus_reset()
153 * Choose the more appropriate CAM status if
168 * Build CAM result for a failed or auto-sensed IO.
172 struct scsi_cmnd *cmd = cp->cmd; in sym_set_cam_result_error()
177 scsi_status = cp->ssss_status; in sym_set_cam_result_error()
179 if (cp->host_flags & HF_SENSE) { in sym_set_cam_result_error()
180 scsi_status = cp->sv_scsi_status; in sym_set_cam_result_error()
181 resid = cp->sv_resid; in sym_set_cam_result_error()
182 if (sym_verbose && cp->sv_xerr_status) in sym_set_cam_result_error()
183 sym_print_xerr(cmd, cp->sv_xerr_status); in sym_set_cam_result_error()
184 if (cp->host_status == HS_COMPLETE && in sym_set_cam_result_error()
185 cp->ssss_status == S_GOOD && in sym_set_cam_result_error()
186 cp->xerr_status == 0) { in sym_set_cam_result_error()
188 cp->sv_xerr_status); in sym_set_cam_result_error()
193 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); in sym_set_cam_result_error()
194 memcpy(cmd->sense_buffer, cp->sns_bbuf, in sym_set_cam_result_error()
204 p = (u_char *) cmd->sense_data; in sym_set_cam_result_error()
207 cp->target,cp->lun, -1); in sym_set_cam_result_error()
217 sym_reset_scsi_target(np, cmd->device->id); in sym_set_cam_result_error()
220 } else if (cp->host_status == HS_COMPLETE) /* Bad SCSI status */ in sym_set_cam_result_error()
222 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ in sym_set_cam_result_error()
224 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ in sym_set_cam_result_error()
229 cp->host_status, cp->ssss_status, in sym_set_cam_result_error()
230 cp->xerr_status); in sym_set_cam_result_error()
233 * Set the most appropriate value for CAM status. in sym_set_cam_result_error()
235 cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status); in sym_set_cam_result_error()
238 cmd->result = (drv_status << 24) | (cam_status << 16) | scsi_status; in sym_set_cam_result_error()
246 cp->data_len = 0; in sym_scatter()
251 struct sym_tcb *tp = &np->target[cp->target]; in sym_scatter()
256 return -1; in sym_scatter()
259 data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg]; in sym_scatter()
265 if ((len & 1) && (tp->head.wval & EWS)) { in sym_scatter()
267 cp->odd_byte_adjustment++; in sym_scatter()
271 cp->data_len += len; in sym_scatter()
274 segment = -2; in sym_scatter()
285 struct scsi_device *sdev = cmd->device; in sym_queue_command()
294 tp = &np->target[sdev->id]; in sym_queue_command()
299 lp = sym_lp(tp, sdev->lun); in sym_queue_command()
300 order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0; in sym_queue_command()
317 memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len); in sym_setup_cdb()
319 cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]); in sym_setup_cdb()
320 cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len); in sym_setup_cdb()
342 dir = cmd->sc_data_direction; in sym_setup_data_and_start()
344 cp->segments = sym_scatter(np, cp, cmd); in sym_setup_data_and_start()
345 if (cp->segments < 0) { in sym_setup_data_and_start()
353 if (!cp->segments) in sym_setup_data_and_start()
356 cp->data_len = 0; in sym_setup_data_and_start()
357 cp->segments = 0; in sym_setup_data_and_start()
370 lastp = goalp - 8 - (cp->segments * (2*4)); in sym_setup_data_and_start()
373 cp->host_flags |= HF_DATA_IN; in sym_setup_data_and_start()
375 lastp = goalp - 8 - (cp->segments * (2*4)); in sym_setup_data_and_start()
386 cp->phys.head.lastp = cpu_to_scr(lastp); in sym_setup_data_and_start()
387 cp->phys.head.savep = cpu_to_scr(lastp); in sym_setup_data_and_start()
388 cp->startp = cp->phys.head.savep; in sym_setup_data_and_start()
389 cp->goalp = cpu_to_scr(goalp); in sym_setup_data_and_start()
398 switch (cp->cdb_buf[0]) { in sym_setup_data_and_start()
433 np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL; in sym_timer()
434 add_timer(&np->s.timer); in sym_timer()
440 if (np->s.settle_time_valid) { in sym_timer()
441 if (time_before_eq(np->s.settle_time, thistime)) { in sym_timer()
445 np->s.settle_time_valid = 0; in sym_timer()
453 if (np->s.lasttime + 4*HZ < thistime) { in sym_timer()
454 np->s.lasttime = thistime; in sym_timer()
459 * Some way-broken PCI bridges may lead to in sym_timer()
472 * PCI BUS error handler.
477 struct pci_dev *pdev = sym_data->pdev; in sym_log_bus_error()
483 "PCI bus error: status = 0x%04x\n", pci_sts & 0xf900); in sym_log_bus_error()
488 * queuecommand method. Entered with the host adapter lock held and
498 cmd->scsi_done = done; in sym53c8xx_queue_command_lck()
505 if (np->s.settle_time_valid && cmd->request->timeout) { in sym53c8xx_queue_command_lck()
506 unsigned long tlimit = jiffies + cmd->request->timeout; in sym53c8xx_queue_command_lck()
507 tlimit -= SYM_CONF_TIMER_INTERVAL*2; in sym53c8xx_queue_command_lck()
508 if (time_after(np->s.settle_time, tlimit)) { in sym53c8xx_queue_command_lck()
509 np->s.settle_time = tlimit; in sym53c8xx_queue_command_lck()
513 if (np->s.settle_time_valid) in sym53c8xx_queue_command_lck()
534 if (pci_channel_offline(sym_data->pdev)) in DEF_SCSI_QCMD()
539 spin_lock(shost->host_lock); in DEF_SCSI_QCMD()
541 spin_unlock(shost->host_lock); in DEF_SCSI_QCMD()
556 spin_lock_irqsave(np->s.host->host_lock, flags); in sym53c8xx_timer()
558 spin_unlock_irqrestore(np->s.host->host_lock, flags); in sym53c8xx_timer()
571 * Generic method for our eh processing.
577 struct Scsi_Host *shost = cmd->device->host; in sym_eh_handler()
579 struct pci_dev *pdev = sym_data->pdev; in sym_eh_handler()
580 struct sym_hcb *np = sym_data->ncb; in sym_eh_handler()
583 int sts = -1; in sym_eh_handler()
588 /* We may be in an error condition because the PCI bus in sym_eh_handler()
590 * PCI bus is reset, the card is reset, and only then in sym_eh_handler()
598 spin_lock_irq(shost->host_lock); in sym_eh_handler()
601 BUG_ON(sym_data->io_reset); in sym_eh_handler()
602 sym_data->io_reset = &eh_done; in sym_eh_handler()
606 spin_unlock_irq(shost->host_lock); in sym_eh_handler()
609 (sym_data->io_reset, in sym_eh_handler()
611 spin_lock_irq(shost->host_lock); in sym_eh_handler()
612 sym_data->io_reset = NULL; in sym_eh_handler()
613 spin_unlock_irq(shost->host_lock); in sym_eh_handler()
618 spin_lock_irq(shost->host_lock); in sym_eh_handler()
619 /* This one is queued in some place -> to wait for completion */ in sym_eh_handler()
620 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { in sym_eh_handler()
622 if (cp->cmd == cmd) { in sym_eh_handler()
629 sts = -1; in sym_eh_handler()
635 sts = sym_reset_scsi_target(np, cmd->device->id); in sym_eh_handler()
656 ucmd->eh_done = &eh_done; in sym_eh_handler()
657 spin_unlock_irq(shost->host_lock); in sym_eh_handler()
659 ucmd->eh_done = NULL; in sym_eh_handler()
660 sts = -2; in sym_eh_handler()
663 spin_unlock_irq(shost->host_lock); in sym_eh_handler()
666 dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname, in sym_eh_handler()
667 sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed"); in sym_eh_handler()
692 return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd); in sym53c8xx_eh_host_reset_handler()
706 oldtags = lp->s.reqtags; in sym_tune_dev_queuing()
708 if (reqtags > lp->s.scdev_depth) in sym_tune_dev_queuing()
709 reqtags = lp->s.scdev_depth; in sym_tune_dev_queuing()
711 lp->s.reqtags = reqtags; in sym_tune_dev_queuing()
714 dev_info(&tp->starget->dev, in sym_tune_dev_queuing()
716 lp->s.reqtags ? "enabled" : "disabled", reqtags); in sym_tune_dev_queuing()
722 struct sym_hcb *np = sym_get_hcb(sdev->host); in sym53c8xx_slave_alloc()
723 struct sym_tcb *tp = &np->target[sdev->id]; in sym53c8xx_slave_alloc()
728 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) in sym53c8xx_slave_alloc()
729 return -ENXIO; in sym53c8xx_slave_alloc()
731 spin_lock_irqsave(np->s.host->host_lock, flags); in sym53c8xx_slave_alloc()
742 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { in sym53c8xx_slave_alloc()
743 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; in sym53c8xx_slave_alloc()
744 starget_printk(KERN_INFO, sdev->sdev_target, in sym53c8xx_slave_alloc()
746 error = -ENXIO; in sym53c8xx_slave_alloc()
750 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { in sym53c8xx_slave_alloc()
751 if (sdev->lun != 0) { in sym53c8xx_slave_alloc()
752 error = -ENXIO; in sym53c8xx_slave_alloc()
755 starget_printk(KERN_INFO, sdev->sdev_target, in sym53c8xx_slave_alloc()
759 lp = sym_alloc_lcb(np, sdev->id, sdev->lun); in sym53c8xx_slave_alloc()
761 error = -ENOMEM; in sym53c8xx_slave_alloc()
764 if (tp->nlcb == 1) in sym53c8xx_slave_alloc()
765 tp->starget = sdev->sdev_target; in sym53c8xx_slave_alloc()
767 spi_min_period(tp->starget) = tp->usr_period; in sym53c8xx_slave_alloc()
768 spi_max_width(tp->starget) = tp->usr_width; in sym53c8xx_slave_alloc()
772 spin_unlock_irqrestore(np->s.host->host_lock, flags); in sym53c8xx_slave_alloc()
782 struct sym_hcb *np = sym_get_hcb(sdev->host); in sym53c8xx_slave_configure()
783 struct sym_tcb *tp = &np->target[sdev->id]; in sym53c8xx_slave_configure()
784 struct sym_lcb *lp = sym_lp(tp, sdev->lun); in sym53c8xx_slave_configure()
790 lp->curr_flags = lp->user_flags; in sym53c8xx_slave_configure()
799 if (reqtags > tp->usrtags) in sym53c8xx_slave_configure()
800 reqtags = tp->usrtags; in sym53c8xx_slave_configure()
801 if (!sdev->tagged_supported) in sym53c8xx_slave_configure()
807 lp->s.scdev_depth = depth_to_use; in sym53c8xx_slave_configure()
808 sym_tune_dev_queuing(tp, sdev->lun, reqtags); in sym53c8xx_slave_configure()
810 if (!spi_initial_dv(sdev->sdev_target)) in sym53c8xx_slave_configure()
818 struct sym_hcb *np = sym_get_hcb(sdev->host); in sym53c8xx_slave_destroy()
819 struct sym_tcb *tp = &np->target[sdev->id]; in sym53c8xx_slave_destroy()
820 struct sym_lcb *lp = sym_lp(tp, sdev->lun); in sym53c8xx_slave_destroy()
827 spin_lock_irqsave(np->s.host->host_lock, flags); in sym53c8xx_slave_destroy()
829 if (lp->busy_itlq || lp->busy_itl) { in sym53c8xx_slave_destroy()
832 * so let's try to stop all on-going I/O. in sym53c8xx_slave_destroy()
834 starget_printk(KERN_WARNING, tp->starget, in sym53c8xx_slave_destroy()
835 "Removing busy LCB (%d)\n", (u8)sdev->lun); in sym53c8xx_slave_destroy()
839 if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) { in sym53c8xx_slave_destroy()
843 tp->head.sval = 0; in sym53c8xx_slave_destroy()
844 tp->head.wval = np->rv_scntl3; in sym53c8xx_slave_destroy()
845 tp->head.uval = 0; in sym53c8xx_slave_destroy()
846 tp->tgoal.check_nego = 1; in sym53c8xx_slave_destroy()
847 tp->starget = NULL; in sym53c8xx_slave_destroy()
850 spin_unlock_irqrestore(np->s.host->host_lock, flags); in sym53c8xx_slave_destroy()
856 static const char *sym53c8xx_info (struct Scsi_Host *host) in sym53c8xx_info() argument
895 switch (uc->cmd) { in sym_exec_user_command()
900 sym_debug_flags = uc->data; in sym_exec_user_command()
904 np->verbose = uc->data; in sym_exec_user_command()
913 if (!((uc->target >> t) & 1)) in sym_exec_user_command()
915 tp = &np->target[t]; in sym_exec_user_command()
916 if (!tp->nlcb) in sym_exec_user_command()
919 switch (uc->cmd) { in sym_exec_user_command()
922 if (!uc->data || uc->data >= 255) { in sym_exec_user_command()
923 tp->tgoal.iu = tp->tgoal.dt = in sym_exec_user_command()
924 tp->tgoal.qas = 0; in sym_exec_user_command()
925 tp->tgoal.offset = 0; in sym_exec_user_command()
926 } else if (uc->data <= 9 && np->minsync_dt) { in sym_exec_user_command()
927 if (uc->data < np->minsync_dt) in sym_exec_user_command()
928 uc->data = np->minsync_dt; in sym_exec_user_command()
929 tp->tgoal.iu = tp->tgoal.dt = in sym_exec_user_command()
930 tp->tgoal.qas = 1; in sym_exec_user_command()
931 tp->tgoal.width = 1; in sym_exec_user_command()
932 tp->tgoal.period = uc->data; in sym_exec_user_command()
933 tp->tgoal.offset = np->maxoffs_dt; in sym_exec_user_command()
935 if (uc->data < np->minsync) in sym_exec_user_command()
936 uc->data = np->minsync; in sym_exec_user_command()
937 tp->tgoal.iu = tp->tgoal.dt = in sym_exec_user_command()
938 tp->tgoal.qas = 0; in sym_exec_user_command()
939 tp->tgoal.period = uc->data; in sym_exec_user_command()
940 tp->tgoal.offset = np->maxoffs; in sym_exec_user_command()
942 tp->tgoal.check_nego = 1; in sym_exec_user_command()
945 tp->tgoal.width = uc->data ? 1 : 0; in sym_exec_user_command()
946 tp->tgoal.check_nego = 1; in sym_exec_user_command()
950 sym_tune_dev_queuing(tp, l, uc->data); in sym_exec_user_command()
953 tp->to_reset = 1; in sym_exec_user_command()
954 np->istat_sem = SEM; in sym_exec_user_command()
960 if (lp) lp->to_clear = 1; in sym_exec_user_command()
962 np->istat_sem = SEM; in sym_exec_user_command()
966 tp->usrflags = uc->data; in sym_exec_user_command()
978 for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--); in sym_skip_spaces()
980 return (len - cnt); in sym_skip_spaces()
988 return (end - ptr); in get_int_arg()
1003 return -EINVAL; \
1004 ptr += arg_len; len -= arg_len;
1008 return -EINVAL; \
1009 ptr += arg_len; len -= arg_len;
1027 if (len > 0 && ptr[len-1] == '\n') in sym_user_command()
1028 --len; in sym_user_command()
1031 uc->cmd = UC_SETSYNC; in sym_user_command()
1033 uc->cmd = UC_SETTAGS; in sym_user_command()
1035 uc->cmd = UC_SETVERBOSE; in sym_user_command()
1037 uc->cmd = UC_SETWIDE; in sym_user_command()
1040 uc->cmd = UC_SETDEBUG; in sym_user_command()
1043 uc->cmd = UC_SETFLAG; in sym_user_command()
1045 uc->cmd = UC_RESETDEV; in sym_user_command()
1047 uc->cmd = UC_CLEARDEV; in sym_user_command()
1052 printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd); in sym_user_command()
1056 return -EINVAL; in sym_user_command()
1057 ptr += arg_len; len -= arg_len; in sym_user_command()
1059 switch(uc->cmd) { in sym_user_command()
1068 ptr += arg_len; len -= arg_len; in sym_user_command()
1069 uc->target = ~0; in sym_user_command()
1072 uc->target = (1<<target); in sym_user_command()
1080 switch(uc->cmd) { in sym_user_command()
1086 GET_INT_ARG(ptr, len, uc->data); in sym_user_command()
1088 printk("sym_user_command: data=%ld\n", uc->data); in sym_user_command()
1096 uc->data |= DEBUG_ALLOC; in sym_user_command()
1098 uc->data |= DEBUG_PHASE; in sym_user_command()
1100 uc->data |= DEBUG_QUEUE; in sym_user_command()
1102 uc->data |= DEBUG_RESULT; in sym_user_command()
1104 uc->data |= DEBUG_SCATTER; in sym_user_command()
1106 uc->data |= DEBUG_SCRIPT; in sym_user_command()
1108 uc->data |= DEBUG_TINY; in sym_user_command()
1110 uc->data |= DEBUG_TIMING; in sym_user_command()
1112 uc->data |= DEBUG_NEGO; in sym_user_command()
1114 uc->data |= DEBUG_TAGS; in sym_user_command()
1116 uc->data |= DEBUG_POINTER; in sym_user_command()
1118 return -EINVAL; in sym_user_command()
1119 ptr += arg_len; len -= arg_len; in sym_user_command()
1122 printk("sym_user_command: data=%ld\n", uc->data); in sym_user_command()
1130 uc->data &= ~SYM_DISC_ENABLED; in sym_user_command()
1132 return -EINVAL; in sym_user_command()
1133 ptr += arg_len; len -= arg_len; in sym_user_command()
1141 return -EINVAL; in sym_user_command()
1145 spin_lock_irqsave(shost->host_lock, flags); in sym_user_command()
1147 spin_unlock_irqrestore(shost->host_lock, flags); in sym_user_command()
1162 struct pci_dev *pdev = sym_data->pdev; in sym_show_info()
1163 struct sym_hcb *np = sym_data->ncb; in sym_show_info()
1166 "revision id 0x%x\n", np->s.chip_name, in sym_show_info()
1167 pdev->device, pdev->revision); in sym_show_info()
1168 seq_printf(m, "At PCI address %s, IRQ %u\n", in sym_show_info()
1169 pci_name(pdev), pdev->irq); in sym_show_info()
1171 (int) (np->minsync_dt ? np->minsync_dt : np->minsync), in sym_show_info()
1172 np->maxwide ? "Wide" : "Narrow", in sym_show_info()
1173 np->minsync_dt ? ", DT capable" : ""); in sym_show_info()
1181 return -EINVAL; in sym_show_info()
1194 if (device->s.ioaddr) in sym_iounmap_device()
1195 pci_iounmap(device->pdev, device->s.ioaddr); in sym_iounmap_device()
1196 if (device->s.ramaddr) in sym_iounmap_device()
1197 pci_iounmap(device->pdev, device->s.ramaddr); in sym_iounmap_device()
1210 free_irq(pdev->irq, np->s.host); in sym_free_resources()
1211 if (np->s.ioaddr) in sym_free_resources()
1212 pci_iounmap(pdev, np->s.ioaddr); in sym_free_resources()
1213 if (np->s.ramaddr) in sym_free_resources()
1214 pci_iounmap(pdev, np->s.ramaddr); in sym_free_resources()
1224 * Host attach and initialisations.
1226 * Allocate host data and ncb structure.
1238 struct pci_dev *pdev = dev->pdev; in sym_attach()
1243 printk(KERN_INFO "sym%d: <%s> rev 0x%x at pci %s irq %u\n", in sym_attach()
1244 unit, dev->chip.name, pdev->revision, pci_name(pdev), in sym_attach()
1245 pdev->irq); in sym_attach()
1250 fw = sym_find_firmware(&dev->chip); in sym_attach()
1260 * Allocate immediately the host control block, in sym_attach()
1265 np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB"); in sym_attach()
1268 np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */ in sym_attach()
1269 sym_data->ncb = np; in sym_attach()
1270 sym_data->pdev = pdev; in sym_attach()
1271 np->s.host = shost; in sym_attach()
1278 np->hcb_ba = vtobus(np); in sym_attach()
1279 np->verbose = sym_driver_setup.verbose; in sym_attach()
1280 np->s.unit = unit; in sym_attach()
1281 np->features = dev->chip.features; in sym_attach()
1282 np->clock_divn = dev->chip.nr_divisor; in sym_attach()
1283 np->maxoffs = dev->chip.offset_max; in sym_attach()
1284 np->maxburst = dev->chip.burst_max; in sym_attach()
1285 np->myaddr = dev->host_id; in sym_attach()
1286 np->mmio_ba = (u32)dev->mmio_base; in sym_attach()
1287 np->ram_ba = (u32)dev->ram_base; in sym_attach()
1288 np->s.ioaddr = dev->s.ioaddr; in sym_attach()
1289 np->s.ramaddr = dev->s.ramaddr; in sym_attach()
1294 strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name)); in sym_attach()
1295 sprintf(np->s.inst_name, "sym%d", np->s.unit); in sym_attach()
1297 if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) && in sym_attach()
1298 !dma_set_mask(&pdev->dev, DMA_DAC_MASK)) { in sym_attach()
1300 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { in sym_attach()
1305 if (sym_hcb_attach(shost, fw, dev->nvram)) in sym_attach()
1313 if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX, in sym_attach()
1316 sym_name(np), pdev->irq); in sym_attach()
1325 spin_lock_irqsave(shost->host_lock, flags); in sym_attach()
1337 timer_setup(&np->s.timer, sym53c8xx_timer, 0); in sym_attach()
1338 np->s.lasttime=0; in sym_attach()
1342 * Fill Linux host instance structure in sym_attach()
1345 shost->max_channel = 0; in sym_attach()
1346 shost->this_id = np->myaddr; in sym_attach()
1347 shost->max_id = np->maxwide ? 16 : 8; in sym_attach()
1348 shost->max_lun = SYM_CONF_MAX_LUN; in sym_attach()
1349 shost->unique_id = pci_resource_start(pdev, 0); in sym_attach()
1350 shost->cmd_per_lun = SYM_CONF_MAX_TAG; in sym_attach()
1351 shost->can_queue = (SYM_CONF_MAX_START-2); in sym_attach()
1352 shost->sg_tablesize = SYM_CONF_MAX_SG; in sym_attach()
1353 shost->max_cmd_len = 16; in sym_attach()
1355 shost->transportt = sym2_transport_template; in sym_attach()
1358 if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 2) in sym_attach()
1359 shost->dma_boundary = 0xFFFFFF; in sym_attach()
1361 spin_unlock_irqrestore(shost->host_lock, flags); in sym_attach()
1366 printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, " in sym_attach()
1368 spin_unlock_irqrestore(shost->host_lock, flags); in sym_attach()
1388 devp->nvram = nvp; in sym_get_nvram()
1389 nvp->type = 0; in sym_get_nvram()
1402 struct pci_dev *pdev = device->pdev; in sym_check_supported()
1413 return -ENODEV; in sym_check_supported()
1422 chip = sym_lookup_chip_table(pdev->device, pdev->revision); in sym_check_supported()
1424 dev_info(&pdev->dev, "device not supported\n"); in sym_check_supported()
1425 return -ENODEV; in sym_check_supported()
1427 memcpy(&device->chip, chip, sizeof(device->chip)); in sym_check_supported()
1434 * These controllers set value 0x52414944 at RAM end - 16.
1440 if (!device->s.ramaddr) in sym_check_raid()
1443 if (device->chip.features & FE_RAM8K) in sym_check_raid()
1448 ram_val = readl(device->s.ramaddr + ram_size - 16); in sym_check_raid()
1452 dev_info(&device->pdev->dev, in sym_check_raid()
1454 return -ENODEV; in sym_check_raid()
1459 struct sym_chip *chip = &device->chip; in sym_set_workarounds()
1460 struct pci_dev *pdev = device->pdev; in sym_set_workarounds()
1468 if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 0x4) { in sym_set_workarounds()
1469 chip->features |= (FE_WRIE | FE_CLSE); in sym_set_workarounds()
1473 if (chip->features & FE_WRIE) { in sym_set_workarounds()
1475 return -ENODEV; in sym_set_workarounds()
1485 * Recall: writes are not normal to status register - in sym_set_workarounds()
1490 if (chip->features & FE_66MHZ) { in sym_set_workarounds()
1492 chip->features &= ~FE_66MHZ; in sym_set_workarounds()
1505 * Map HBA registers and on-chip SRAM (if present).
1509 struct pci_dev *pdev = device->pdev; in sym_iomap_device()
1513 pcibios_resource_to_bus(pdev->bus, &bus_addr, &pdev->resource[1]); in sym_iomap_device()
1514 device->mmio_base = bus_addr.start; in sym_iomap_device()
1516 if (device->chip.features & FE_RAM) { in sym_iomap_device()
1518 * If the BAR is 64-bit, resource 2 will be occupied by the in sym_iomap_device()
1521 if (!pdev->resource[i].flags) in sym_iomap_device()
1523 pcibios_resource_to_bus(pdev->bus, &bus_addr, in sym_iomap_device()
1524 &pdev->resource[i]); in sym_iomap_device()
1525 device->ram_base = bus_addr.start; in sym_iomap_device()
1529 if (device->mmio_base) in sym_iomap_device()
1530 device->s.ioaddr = pci_iomap(pdev, 1, in sym_iomap_device()
1533 if (!device->s.ioaddr) in sym_iomap_device()
1534 device->s.ioaddr = pci_iomap(pdev, 0, in sym_iomap_device()
1536 if (!device->s.ioaddr) { in sym_iomap_device()
1537 dev_err(&pdev->dev, "could not map registers; giving up.\n"); in sym_iomap_device()
1538 return -EIO; in sym_iomap_device()
1540 if (device->ram_base) { in sym_iomap_device()
1541 device->s.ramaddr = pci_iomap(pdev, i, in sym_iomap_device()
1543 if (!device->s.ramaddr) { in sym_iomap_device()
1544 dev_warn(&pdev->dev, in sym_iomap_device()
1546 device->ram_base = 0; in sym_iomap_device()
1572 struct pci_dev *memc = pci_get_slot(pdev->bus, slot); in sym_config_pqs()
1574 if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) { in sym_config_pqs()
1598 sym_dev->host_id = tmp; in sym_config_pqs()
1603 * Detach the host.
1611 del_timer_sync(&np->s.timer); in sym_detach()
1631 * Driver host template.
1703 if (scsi_add_host(shost, &pdev->dev)) in sym2_probe()
1721 return -ENODEV; in sym2_probe()
1733 attach_count--; in sym2_remove()
1737 * sym2_io_error_detected() - called when PCI error is detected
1738 * @pdev: pointer to PCI device
1739 * @state: current state of the PCI slot
1750 disable_irq(pdev->irq); in sym2_io_error_detected()
1758 * sym2_io_slot_dump - Enable MMIO and dump debug registers
1759 * @pdev: pointer to PCI device
1772 * sym2_reset_workarounds - hardware-specific work-arounds
1773 * @pdev: pointer to PCI device
1778 * of the steps taken there are un-needed here.
1785 chip = sym_lookup_chip_table(pdev->device, pdev->revision); in sym2_reset_workarounds()
1791 if (!(chip->features & FE_66MHZ) && (status_reg & PCI_STATUS_66MHZ)) { in sym2_reset_workarounds()
1799 * sym2_io_slot_reset() - called when the pci bus has been reset.
1800 * @pdev: pointer to PCI device
1809 printk(KERN_INFO "%s: recovering from a PCI slot reset\n", in sym2_io_slot_reset()
1813 printk(KERN_ERR "%s: Unable to enable after PCI reset\n", in sym2_io_slot_reset()
1819 enable_irq(pdev->irq); in sym2_io_slot_reset()
1822 if (np->features & FE_WRIE) { in sym2_io_slot_reset()
1827 /* Perform work-arounds, analogous to sym_set_workarounds() */ in sym2_io_slot_reset()
1830 /* Perform host reset only on one instance of the card */ in sym2_io_slot_reset()
1831 if (PCI_FUNC(pdev->devfn) == 0) { in sym2_io_slot_reset()
1833 printk(KERN_ERR "%s: Unable to reset scsi host\n", in sym2_io_slot_reset()
1844 * sym2_io_resume() - resume normal ops after PCI reset
1845 * @pdev: pointer to PCI device
1856 spin_lock_irq(shost->host_lock); in sym2_io_resume()
1857 if (sym_data->io_reset) in sym2_io_resume()
1858 complete(sym_data->io_reset); in sym2_io_resume()
1859 spin_unlock_irq(shost->host_lock); in sym2_io_resume()
1867 switch (np->scsi_mode) { in sym2_get_signalling()
1886 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); in sym2_set_offset()
1888 struct sym_tcb *tp = &np->target[starget->id]; in sym2_set_offset()
1890 tp->tgoal.offset = offset; in sym2_set_offset()
1891 tp->tgoal.check_nego = 1; in sym2_set_offset()
1896 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); in sym2_set_period()
1898 struct sym_tcb *tp = &np->target[starget->id]; in sym2_set_period()
1902 if (period <= np->minsync && spi_width(starget)) in sym2_set_period()
1903 tp->tgoal.dt = 1; in sym2_set_period()
1905 tp->tgoal.period = period; in sym2_set_period()
1906 tp->tgoal.check_nego = 1; in sym2_set_period()
1911 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); in sym2_set_width()
1913 struct sym_tcb *tp = &np->target[starget->id]; in sym2_set_width()
1918 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; in sym2_set_width()
1920 tp->tgoal.width = width; in sym2_set_width()
1921 tp->tgoal.check_nego = 1; in sym2_set_width()
1926 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); in sym2_set_dt()
1928 struct sym_tcb *tp = &np->target[starget->id]; in sym2_set_dt()
1932 tp->tgoal.dt = 1; in sym2_set_dt()
1934 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; in sym2_set_dt()
1935 tp->tgoal.check_nego = 1; in sym2_set_dt()
1941 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1943 struct sym_tcb *tp = &np->target[starget->id];
1946 tp->tgoal.iu = tp->tgoal.dt = 1;
1948 tp->tgoal.iu = 0;
1949 tp->tgoal.check_nego = 1;
1954 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1956 struct sym_tcb *tp = &np->target[starget->id];
1959 tp->tgoal.dt = tp->tgoal.qas = 1;
1961 tp->tgoal.qas = 0;
1962 tp->tgoal.check_nego = 1;
2022 MODULE_DEVICE_TABLE(pci, sym2_id_table);
2046 return -ENODEV; in sym2_init()