Lines Matching defs:ioa_cfg
586 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
589 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
590 trace_entry = &ioa_cfg->trace[trace_index];
613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
615 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
617 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
642 if (ipr_cmd->ioa_cfg->sis64) {
701 * @ioa_cfg: ioa config struct
707 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
710 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
717 * @ioa_cfg: ioa config struct
726 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
732 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
733 spin_lock(&ioa_cfg->hrrq[i]._lock);
734 ioa_cfg->hrrq[i].allow_interrupts = 0;
735 spin_unlock(&ioa_cfg->hrrq[i]._lock);
739 if (ioa_cfg->sis64)
740 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
742 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
745 if (ioa_cfg->sis64)
746 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
747 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
748 readl(ioa_cfg->regs.sense_interrupt_reg);
753 * @ioa_cfg: ioa config struct
758 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
760 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
766 rc = pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
767 &ioa_cfg->saved_pcix_cmd_reg);
769 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
773 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
779 * @ioa_cfg: ioa config struct
784 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
786 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
790 rc = pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791 ioa_cfg->saved_pcix_cmd_reg);
793 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
847 * @ioa_cfg: ioa config struct
854 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
860 for_each_hrrq(hrrq, ioa_cfg) {
897 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
900 if (ioa_cfg->sis64) {
908 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
910 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
983 if (ipr_cmd->ioa_cfg->sis64) {
1020 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1025 spin_unlock_irq(ioa_cfg->host->host_lock);
1027 spin_lock_irq(ioa_cfg->host->host_lock);
1030 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1034 if (ioa_cfg->hrrq_num == 1)
1037 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1038 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1045 * @ioa_cfg: ioa config struct
1056 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1062 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1063 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1065 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1089 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1105 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1116 if (ioa_cfg->sis64) {
1131 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1139 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1140 ioa_cfg->max_devs_supported);
1141 set_bit(res->target, ioa_cfg->target_ids);
1148 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1149 ioa_cfg->max_devs_supported);
1150 set_bit(res->target, ioa_cfg->array_ids);
1153 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1154 ioa_cfg->max_devs_supported);
1155 set_bit(res->target, ioa_cfg->vset_ids);
1157 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1158 ioa_cfg->max_devs_supported);
1159 set_bit(res->target, ioa_cfg->target_ids);
1187 if (res->ioa_cfg->sis64) {
1228 * @ioa_cfg: ioa config struct
1236 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1242 p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1261 if (res->ioa_cfg->sis64) {
1285 ipr_format_res_path(res->ioa_cfg,
1313 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1315 if (!ioa_cfg->sis64)
1319 clear_bit(res->target, ioa_cfg->array_ids);
1321 clear_bit(res->target, ioa_cfg->vset_ids);
1323 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1326 clear_bit(res->target, ioa_cfg->target_ids);
1329 clear_bit(res->target, ioa_cfg->target_ids);
1334 * @ioa_cfg: ioa config struct
1340 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1349 if (ioa_cfg->sis64) {
1357 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1365 if (list_empty(&ioa_cfg->free_res_q)) {
1366 ipr_send_hcam(ioa_cfg,
1372 res = list_entry(ioa_cfg->free_res_q.next,
1377 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1386 schedule_work(&ioa_cfg->work_q);
1389 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1393 schedule_work(&ioa_cfg->work_q);
1396 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1411 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1421 dev_err(&ioa_cfg->pdev->dev,
1424 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1426 ipr_handle_config_change(ioa_cfg, hostrcb);
1534 * @ioa_cfg: ioa config struct
1540 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1545 if (ioa_cfg->sis64)
1570 * @ioa_cfg: ioa config struct
1576 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1602 * @ioa_cfg: ioa config struct
1608 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1626 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1642 * @ioa_cfg: ioa config struct
1648 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1685 * @ioa_cfg: ioa config struct
1691 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1709 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1732 * @ioa_cfg: ioa config struct
1738 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1752 ioa_cfg->host->host_no,
1773 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1774 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1783 * @ioa_cfg: ioa config struct
1789 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1803 ioa_cfg->host->host_no,
1823 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1824 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1838 * @ioa_cfg: ioa config struct
1845 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1852 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1866 * @ioa_cfg: ioa config struct
1872 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1877 if (ioa_cfg->sis64)
1888 ipr_log_hex_data(ioa_cfg, error->data,
1896 * @ioa_cfg: ioa config struct
1902 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1914 ipr_log_hex_data(ioa_cfg, error->data,
2015 ipr_format_res_path(hostrcb->ioa_cfg,
2023 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2165 ipr_format_res_path(hostrcb->ioa_cfg,
2175 ipr_format_res_path(hostrcb->ioa_cfg,
2183 * @ioa_cfg: ioa config struct
2189 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2215 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2220 * @ioa_cfg: ioa config struct
2226 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2241 ipr_format_res_path(ioa_cfg, error->last_res_path,
2263 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2266 ipr_format_res_path(ioa_cfg,
2276 * @ioa_cfg: ioa config struct
2282 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2309 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2314 * @ioa_cfg: ioa config struct
2320 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2329 ipr_log_hex_data(ioa_cfg, error->data,
2336 * @ioa_cfg: ioa config struct
2342 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2345 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2351 * @ioa_cfg: ioa config struct
2357 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2377 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2379 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2382 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2409 * @ioa_cfg: ioa config struct
2417 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2428 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2430 if (ioa_cfg->sis64)
2435 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2438 scsi_report_bus_reset(ioa_cfg->host,
2452 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2459 ioa_cfg->errors_logged++;
2461 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2468 ipr_log_cache_error(ioa_cfg, hostrcb);
2471 ipr_log_config_error(ioa_cfg, hostrcb);
2475 ipr_log_array_error(ioa_cfg, hostrcb);
2478 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2481 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2484 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2488 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2491 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2494 ipr_log_fabric_error(ioa_cfg, hostrcb);
2497 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2500 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2504 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2507 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2510 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2515 ipr_log_generic_error(ioa_cfg, hostrcb);
2550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2555 if (ioa_cfg->sis64)
2564 ipr_handle_log_data(ioa_cfg, hostrcb);
2566 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2569 dev_err(&ioa_cfg->pdev->dev,
2573 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2574 schedule_work(&ioa_cfg->work_q);
2575 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2577 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2594 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2597 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2599 ioa_cfg->errors_logged++;
2600 dev_err(&ioa_cfg->pdev->dev,
2603 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2604 ioa_cfg->sdt_state = GET_DUMP;
2606 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2607 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2609 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2630 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2632 ioa_cfg->errors_logged++;
2633 dev_err(&ioa_cfg->pdev->dev,
2636 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2637 ioa_cfg->sdt_state = GET_DUMP;
2639 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2641 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2642 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2684 * @ioa_cfg: ioa config struct
2694 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2701 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2719 * @ioa_cfg: ioa config struct
2727 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2734 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2752 * @ioa_cfg: ioa config struct
2760 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2767 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2768 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2777 * @ioa_cfg: ioa config struct
2785 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2792 if (ioa_cfg->sis64)
2793 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2798 ioa_cfg->regs.set_uproc_interrupt_reg32);
2801 if (ipr_wait_iodbg_ack(ioa_cfg,
2803 dev_err(&ioa_cfg->pdev->dev,
2810 ioa_cfg->regs.clr_interrupt_reg);
2813 writel(start_addr, ioa_cfg->ioa_mailbox);
2817 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2821 if (ipr_wait_iodbg_ack(ioa_cfg,
2823 dev_err(&ioa_cfg->pdev->dev,
2829 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2836 ioa_cfg->regs.clr_interrupt_reg);
2842 ioa_cfg->regs.set_uproc_interrupt_reg32);
2845 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2849 ioa_cfg->regs.clr_interrupt_reg);
2854 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2869 * @ioa_cfg: ioa config struct
2878 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2885 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2887 if (ioa_cfg->sis64)
2913 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2914 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2917 rc = ipr_get_ldump_data_section(ioa_cfg,
2922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2954 * @ioa_cfg: ioa config struct
2960 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2963 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2971 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2980 * @ioa_cfg: ioa config struct
2986 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3001 * @ioa_cfg: ioa config struct
3007 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3016 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3022 * @ioa_cfg: ioa config struct
3028 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3037 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3043 * @ioa_cfg: ioa config struct
3049 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3063 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3065 if (ioa_cfg->sdt_state != READ_DUMP) {
3066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3070 if (ioa_cfg->sis64) {
3071 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3073 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3076 start_addr = readl(ioa_cfg->ioa_mailbox);
3078 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3079 dev_err(&ioa_cfg->pdev->dev,
3081 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3085 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3097 ipr_dump_version_data(ioa_cfg, driver_dump);
3098 ipr_dump_location_data(ioa_cfg, driver_dump);
3099 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3100 ipr_dump_trace_data(ioa_cfg, driver_dump);
3117 if (ioa_cfg->sis64) {
3127 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3133 dev_err(&ioa_cfg->pdev->dev,
3137 ioa_cfg->sdt_state = DUMP_OBTAINED;
3138 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3149 if (ioa_cfg->sis64)
3154 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3164 if (ioa_cfg->sis64)
3182 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3195 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3200 ioa_cfg->sdt_state = DUMP_OBTAINED;
3205 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3218 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3223 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3224 ioa_cfg->dump = NULL;
3225 ioa_cfg->sdt_state = INACTIVE;
3226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3241 struct ipr_ioa_cfg *ioa_cfg =
3247 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3252 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3253 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3263 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3266 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3269 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3276 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3282 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3283 scsi_add_device(ioa_cfg->host, bus, target, lun);
3284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3289 ioa_cfg->scan_done = 1;
3290 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3291 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3310 struct ipr_ioa_cfg *ioa_cfg =
3314 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3316 if (ioa_cfg->sdt_state == READ_DUMP) {
3317 dump = ioa_cfg->dump;
3319 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3323 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3324 ipr_get_ioa_dump(ioa_cfg, dump);
3327 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3328 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3329 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3330 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3334 if (ioa_cfg->scsi_unblock) {
3335 ioa_cfg->scsi_unblock = 0;
3336 ioa_cfg->scsi_blocked = 0;
3337 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3338 scsi_unblock_requests(ioa_cfg->host);
3339 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3340 if (ioa_cfg->scsi_blocked)
3341 scsi_block_requests(ioa_cfg->host);
3344 if (!ioa_cfg->scan_enabled) {
3345 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3349 schedule_work(&ioa_cfg->scsi_add_work_q);
3351 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3374 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3378 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3379 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3381 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3409 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3410 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3414 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3419 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3444 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3448 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3449 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3450 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3469 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3473 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3474 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3505 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3512 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3513 while (ioa_cfg->in_reset_reload) {
3514 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3515 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3516 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3519 ioa_cfg->errors_logged = 0;
3520 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3522 if (ioa_cfg->in_reset_reload) {
3523 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3524 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3529 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3533 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3534 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3536 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3562 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3566 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3567 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3571 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3592 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3599 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3600 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3602 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3603 spin_lock(&ioa_cfg->hrrq[i]._lock);
3604 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3605 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3608 ioa_cfg->reset_retries = 0;
3609 ioa_cfg->in_ioa_bringdown = 0;
3610 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3612 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3613 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3644 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3651 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3652 if (!ioa_cfg->in_reset_reload)
3653 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3654 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3655 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3682 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3687 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3708 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3713 if (!ioa_cfg->sis64) {
3714 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3721 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3725 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3726 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3730 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3731 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3732 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3736 ioa_cfg->iopoll_weight = user_iopoll_weight;
3737 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3738 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3739 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3740 ioa_cfg->iopoll_weight, ipr_iopoll);
3929 * @ioa_cfg: ioa config struct
3937 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3942 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3943 while (ioa_cfg->in_reset_reload) {
3944 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3945 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3946 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3949 if (ioa_cfg->ucode_sglist) {
3950 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3951 dev_err(&ioa_cfg->pdev->dev,
3956 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3962 dev_err(&ioa_cfg->pdev->dev,
3967 ioa_cfg->ucode_sglist = sglist;
3968 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3970 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3972 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3973 ioa_cfg->ucode_sglist = NULL;
3974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3995 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4013 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4014 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4025 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4033 dev_err(&ioa_cfg->pdev->dev,
4040 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4071 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4075 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4076 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4077 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4095 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4100 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4101 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4104 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4119 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4123 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4124 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4127 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4132 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4133 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4180 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4190 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4191 dump = ioa_cfg->dump;
4193 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4194 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4198 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4224 if (ioa_cfg->sis64)
4265 * @ioa_cfg: ioa config struct
4270 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4283 if (ioa_cfg->sis64)
4299 dump->ioa_cfg = ioa_cfg;
4301 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4303 if (INACTIVE != ioa_cfg->sdt_state) {
4304 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4310 ioa_cfg->dump = dump;
4311 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4312 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4313 ioa_cfg->dump_taken = 1;
4314 schedule_work(&ioa_cfg->work_q);
4316 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4323 * @ioa_cfg: ioa config struct
4328 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4336 dump = ioa_cfg->dump;
4338 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4342 ioa_cfg->dump = NULL;
4343 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4369 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4376 rc = ipr_alloc_dump(ioa_cfg);
4378 rc = ipr_free_dump(ioa_cfg);
4398 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4427 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4432 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4467 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4469 if (res && ioa_cfg->sis64)
4474 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4477 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4501 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4506 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4508 if (res && ioa_cfg->sis64)
4513 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4537 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4542 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4548 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4573 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4578 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4584 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4603 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4608 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4690 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4693 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4711 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4713 if (ioa_cfg->sis64) {
4716 clear_bit(starget->id, ioa_cfg->array_ids);
4718 clear_bit(starget->id, ioa_cfg->vset_ids);
4720 clear_bit(starget->id, ioa_cfg->target_ids);
4734 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4737 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4757 struct ipr_ioa_cfg *ioa_cfg;
4760 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4762 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4768 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4784 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4789 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4805 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4807 if (ioa_cfg->sis64)
4809 ipr_format_res_path(ioa_cfg,
4813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4831 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4838 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4852 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4857 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4898 * @ioa_cfg: ioa config struct
4905 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4919 for_each_hrrq(hrrq, ioa_cfg) {
4922 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
4939 for_each_hrrq(hrrq, ioa_cfg) {
4942 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
4954 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4967 struct ipr_ioa_cfg *ioa_cfg;
4972 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4973 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4975 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4976 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4977 dev_err(&ioa_cfg->pdev->dev,
4980 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4981 ioa_cfg->sdt_state = GET_DUMP;
4984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4985 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4986 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4990 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4995 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5002 * @ioa_cfg: ioa config struct
5013 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5022 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5026 if (ipr_cmd->ioa_cfg->sis64)
5054 struct ipr_ioa_cfg *ioa_cfg;
5059 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5067 if (ioa_cfg->in_reset_reload)
5069 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5075 rc = ipr_device_reset(ioa_cfg, res);
5086 struct ipr_ioa_cfg *ioa_cfg;
5089 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5100 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5116 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5120 if (!ioa_cfg->sis64)
5121 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5123 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5156 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5161 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5162 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5163 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5168 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5178 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5194 struct ipr_ioa_cfg *ioa_cfg;
5202 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5209 if (ioa_cfg->in_reset_reload ||
5210 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5220 readl(ioa_cfg->regs.sense_interrupt_reg);
5225 for_each_hrrq(hrrq, ioa_cfg) {
5228 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5229 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5241 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5281 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5285 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5287 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5304 struct ipr_ioa_cfg *ioa_cfg;
5308 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5315 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5322 * @ioa_cfg: ioa config struct
5328 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5334 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5341 if (ioa_cfg->sis64) {
5342 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5343 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5347 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5348 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5349 list_del(&ioa_cfg->reset_cmd->queue);
5350 timer_delete(&ioa_cfg->reset_cmd->timer);
5351 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5361 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5362 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5364 list_del(&ioa_cfg->reset_cmd->queue);
5365 timer_delete(&ioa_cfg->reset_cmd->timer);
5366 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5368 if (ioa_cfg->clear_isr) {
5370 dev_err(&ioa_cfg->pdev->dev,
5372 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5373 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5378 ioa_cfg->ioa_unit_checked = 1;
5380 dev_err(&ioa_cfg->pdev->dev,
5383 dev_err(&ioa_cfg->pdev->dev,
5386 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5387 ioa_cfg->sdt_state = GET_DUMP;
5389 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5390 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5398 * @ioa_cfg: ioa config struct
5405 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5407 ioa_cfg->errors_logged++;
5408 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5410 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5411 ioa_cfg->sdt_state = GET_DUMP;
5413 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5422 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5438 ipr_isr_eh(ioa_cfg,
5444 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5502 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5522 if (!ioa_cfg->clear_isr)
5529 ioa_cfg->regs.clr_interrupt_reg32);
5530 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5535 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5539 ipr_isr_eh(ioa_cfg,
5548 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5570 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5584 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5611 * @ioa_cfg: ioa config struct
5617 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5635 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5663 * @ioa_cfg: ioa config struct
5669 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5686 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5800 if (ipr_cmd->ioa_cfg->sis64)
5903 * @ioa_cfg: ioa config struct
5914 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5930 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5938 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5950 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5953 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5955 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6077 if (ipr_cmd->ioa_cfg->sis64)
6090 * @ioa_cfg: ioa config struct
6099 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6115 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6154 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6208 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6225 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6227 ipr_erp_start(ioa_cfg, ipr_cmd);
6229 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6248 struct ipr_ioa_cfg *ioa_cfg;
6257 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6262 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6263 hrrq = &ioa_cfg->hrrq[hrrq_id];
6330 if (ioa_cfg->sis64)
6331 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6333 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6381 struct ipr_ioa_cfg *ioa_cfg;
6384 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6387 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6430 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6434 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6436 ioa_cfg->scsi_unblock = 1;
6437 schedule_work(&ioa_cfg->work_q);
6440 ioa_cfg->in_reset_reload = 0;
6441 ioa_cfg->reset_retries = 0;
6442 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6443 spin_lock(&ioa_cfg->hrrq[i]._lock);
6444 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6445 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6450 wake_up_all(&ioa_cfg->reset_wait_q);
6469 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6474 ioa_cfg->in_reset_reload = 0;
6475 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6476 spin_lock(&ioa_cfg->hrrq[j]._lock);
6477 ioa_cfg->hrrq[j].allow_cmds = 1;
6478 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6481 ioa_cfg->reset_cmd = NULL;
6482 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6484 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6490 schedule_work(&ioa_cfg->work_q);
6493 list_del_init(&ioa_cfg->hostrcb[j]->queue);
6495 ipr_send_hcam(ioa_cfg,
6497 ioa_cfg->hostrcb[j]);
6499 ipr_send_hcam(ioa_cfg,
6501 ioa_cfg->hostrcb[j]);
6504 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6505 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6507 ioa_cfg->reset_retries = 0;
6509 wake_up_all(&ioa_cfg->reset_wait_q);
6511 ioa_cfg->scsi_unblock = 1;
6512 schedule_work(&ioa_cfg->work_q);
6547 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6548 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6554 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6571 ioa_cfg->vpd_cbs_dma +
6579 if (!ioa_cfg->sis64)
6630 * @ioa_cfg: ioa config struct
6638 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6655 dev_err(&ioa_cfg->pdev->dev,
6666 * @ioa_cfg: ioa config struct
6675 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6681 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6682 ioa_cfg->bus_attr[i].bus_width);
6684 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6685 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6691 * @ioa_cfg: ioa config struct
6699 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6717 dev_err(&ioa_cfg->pdev->dev,
6723 bus_attr = &ioa_cfg->bus_attr[i];
6774 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6775 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6779 ipr_scsi_bus_speed_limit(ioa_cfg);
6780 ipr_check_term_power(ioa_cfg, mode_pages);
6781 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6786 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6790 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6835 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6838 dev_err(&ioa_cfg->pdev->dev,
6842 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6859 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6864 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6884 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6888 0x28, ioa_cfg->vpd_cbs_dma +
6912 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6913 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6928 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6972 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6976 0x24, ioa_cfg->vpd_cbs_dma +
7003 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7010 if (ioa_cfg->sis64)
7011 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7013 flag = ioa_cfg->u.cfg_table->hdr.flags;
7016 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7018 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7021 if (ioa_cfg->sis64)
7022 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7024 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7027 if (ioa_cfg->sis64)
7028 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7030 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7035 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7042 if (list_empty(&ioa_cfg->free_res_q)) {
7043 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7048 res = list_entry(ioa_cfg->free_res_q.next,
7050 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7064 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7070 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7073 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7094 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7096 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7097 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7101 ioa_cfg->dual_raid = 1;
7102 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7109 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7110 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7111 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7113 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7156 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7157 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7248 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7249 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7250 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7258 (ioa_cfg->vpd_cbs_dma
7281 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7282 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7283 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7291 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7312 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7319 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7338 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7344 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7346 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7351 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7369 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7375 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7394 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7400 if (ioa_cfg->identify_hrrq_index == 0)
7401 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7403 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7404 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7410 if (ioa_cfg->sis64)
7413 if (ioa_cfg->nvectors == 1)
7433 ioa_cfg->identify_hrrq_index;
7435 if (ioa_cfg->sis64) {
7448 ioa_cfg->identify_hrrq_index;
7453 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7480 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7483 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7485 if (ioa_cfg->reset_cmd == ipr_cmd) {
7490 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7521 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7522 * @ioa_cfg: ioa cfg struct
7527 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7531 for_each_hrrq(hrrq, ioa_cfg) {
7544 ioa_cfg->identify_hrrq_index = 0;
7545 if (ioa_cfg->hrrq_num == 1)
7546 atomic_set(&ioa_cfg->hrrq_index, 0);
7548 atomic_set(&ioa_cfg->hrrq_index, 1);
7551 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7566 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7569 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7584 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7585 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7586 stage_time = ioa_cfg->transop_timeout;
7589 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7594 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7595 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7622 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7629 ipr_init_ioa_mem(ioa_cfg);
7631 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7632 spin_lock(&ioa_cfg->hrrq[i]._lock);
7633 ioa_cfg->hrrq[i].allow_interrupts = 1;
7634 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7636 if (ioa_cfg->sis64) {
7638 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7639 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7642 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7646 ioa_cfg->regs.clr_interrupt_mask_reg32);
7647 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7652 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7654 if (ioa_cfg->sis64) {
7657 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7659 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7661 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7663 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7665 if (ioa_cfg->sis64) {
7670 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7692 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7694 if (ioa_cfg->sdt_state == GET_DUMP)
7695 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7696 else if (ioa_cfg->sdt_state == READ_DUMP)
7697 ioa_cfg->sdt_state = ABORT_DUMP;
7699 ioa_cfg->dump_timeout = 1;
7707 * @ioa_cfg: ioa config struct
7715 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7717 ioa_cfg->errors_logged++;
7718 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7723 * @ioa_cfg: ioa config struct
7731 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7739 mailbox = readl(ioa_cfg->ioa_mailbox);
7741 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7742 ipr_unit_check_no_data(ioa_cfg);
7747 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7753 ipr_unit_check_no_data(ioa_cfg);
7765 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7770 rc = ipr_get_ldump_data_section(ioa_cfg,
7776 ipr_handle_log_data(ioa_cfg, hostrcb);
7779 ioa_cfg->sdt_state == GET_DUMP)
7780 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7782 ipr_unit_check_no_data(ioa_cfg);
7784 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7798 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7801 ioa_cfg->ioa_unit_checked = 0;
7802 ipr_get_unit_check_buffer(ioa_cfg);
7812 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7816 if (ioa_cfg->sdt_state != GET_DUMP)
7819 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
7820 (readl(ioa_cfg->regs.sense_interrupt_reg) &
7824 dev_err(&ioa_cfg->pdev->dev,
7827 ioa_cfg->sdt_state = READ_DUMP;
7828 ioa_cfg->dump_timeout = 0;
7829 if (ioa_cfg->sis64)
7834 schedule_work(&ioa_cfg->work_q);
7859 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7862 ioa_cfg->pdev->state_saved = true;
7863 pci_restore_state(ioa_cfg->pdev);
7865 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7870 ipr_fail_all_ops(ioa_cfg);
7872 if (ioa_cfg->sis64) {
7874 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7875 readl(ioa_cfg->regs.endian_swap_reg);
7878 if (ioa_cfg->ioa_unit_checked) {
7879 if (ioa_cfg->sis64) {
7884 ioa_cfg->ioa_unit_checked = 0;
7885 ipr_get_unit_check_buffer(ioa_cfg);
7892 if (ioa_cfg->in_ioa_bringdown) {
7894 } else if (ioa_cfg->sdt_state == GET_DUMP) {
7916 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7919 if (ioa_cfg->cfg_locked)
7920 pci_cfg_access_unlock(ioa_cfg->pdev);
7921 ioa_cfg->cfg_locked = 0;
7938 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7942 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7944 ioa_cfg->regs.set_uproc_interrupt_reg32);
7946 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7953 if (ioa_cfg->cfg_locked)
7954 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
7955 ioa_cfg->cfg_locked = 0;
7992 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7993 struct pci_dev *pdev = ioa_cfg->pdev;
8001 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8002 if (ioa_cfg->reset_cmd == ipr_cmd)
8004 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8019 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8023 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8040 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8043 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8044 ioa_cfg->cfg_locked = 1;
8045 ipr_cmd->job_step = ioa_cfg->reset;
8053 ipr_cmd->job_step = ioa_cfg->reset;
8054 dev_err(&ioa_cfg->pdev->dev,
8073 ipr_cmd->ioa_cfg->cfg_locked = 0;
8081 * @ioa_cfg: ioa config struct
8086 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8090 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8111 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8114 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8139 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8144 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8147 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8148 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8172 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8176 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8193 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8202 for_each_hrrq(hrrq, ioa_cfg) {
8206 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8232 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8236 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8242 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8288 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8289 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8291 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8310 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8311 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8327 if (ioa_cfg->sis64)
8353 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8362 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8372 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8400 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8405 if (ioa_cfg->reset_cmd != ipr_cmd) {
8429 * @ioa_cfg: ioa config struct
8441 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8448 ioa_cfg->in_reset_reload = 1;
8449 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8450 spin_lock(&ioa_cfg->hrrq[i]._lock);
8451 ioa_cfg->hrrq[i].allow_cmds = 0;
8452 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8455 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8456 ioa_cfg->scsi_unblock = 0;
8457 ioa_cfg->scsi_blocked = 1;
8458 scsi_block_requests(ioa_cfg->host);
8461 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8462 ioa_cfg->reset_cmd = ipr_cmd;
8471 * @ioa_cfg: ioa config struct
8481 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8486 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8489 if (ioa_cfg->in_reset_reload) {
8490 if (ioa_cfg->sdt_state == GET_DUMP)
8491 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8492 else if (ioa_cfg->sdt_state == READ_DUMP)
8493 ioa_cfg->sdt_state = ABORT_DUMP;
8496 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8497 dev_err(&ioa_cfg->pdev->dev,
8500 ioa_cfg->reset_retries = 0;
8501 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8502 spin_lock(&ioa_cfg->hrrq[i]._lock);
8503 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8504 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8508 if (ioa_cfg->in_ioa_bringdown) {
8509 ioa_cfg->reset_cmd = NULL;
8510 ioa_cfg->in_reset_reload = 0;
8511 ipr_fail_all_ops(ioa_cfg);
8512 wake_up_all(&ioa_cfg->reset_wait_q);
8514 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8515 ioa_cfg->scsi_unblock = 1;
8516 schedule_work(&ioa_cfg->work_q);
8520 ioa_cfg->in_ioa_bringdown = 1;
8525 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8539 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8543 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8544 spin_lock(&ioa_cfg->hrrq[i]._lock);
8545 ioa_cfg->hrrq[i].allow_interrupts = 0;
8546 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8564 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8566 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8567 if (!ioa_cfg->probe_done)
8569 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8584 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8586 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8587 if (ioa_cfg->probe_done)
8588 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8589 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8603 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8605 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8606 if (ioa_cfg->probe_done) {
8607 if (ioa_cfg->needs_warm_reset)
8608 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8610 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8613 wake_up_all(&ioa_cfg->eeh_wait_q);
8614 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8628 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8631 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8632 if (ioa_cfg->probe_done) {
8633 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8634 ioa_cfg->sdt_state = ABORT_DUMP;
8635 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8636 ioa_cfg->in_ioa_bringdown = 1;
8637 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8638 spin_lock(&ioa_cfg->hrrq[i]._lock);
8639 ioa_cfg->hrrq[i].allow_cmds = 0;
8640 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8643 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8645 wake_up_all(&ioa_cfg->eeh_wait_q);
8646 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8677 * @ioa_cfg: ioa cfg struct
8685 static void ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8690 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8691 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8692 ioa_cfg->probe_done = 1;
8693 if (ioa_cfg->needs_hard_reset) {
8694 ioa_cfg->needs_hard_reset = 0;
8695 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8697 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8699 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8706 * @ioa_cfg: ioa config struct
8711 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8715 if (ioa_cfg->ipr_cmnd_list) {
8717 if (ioa_cfg->ipr_cmnd_list[i])
8718 dma_pool_free(ioa_cfg->ipr_cmd_pool,
8719 ioa_cfg->ipr_cmnd_list[i],
8720 ioa_cfg->ipr_cmnd_list_dma[i]);
8722 ioa_cfg->ipr_cmnd_list[i] = NULL;
8726 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
8728 kfree(ioa_cfg->ipr_cmnd_list);
8729 kfree(ioa_cfg->ipr_cmnd_list_dma);
8730 ioa_cfg->ipr_cmnd_list = NULL;
8731 ioa_cfg->ipr_cmnd_list_dma = NULL;
8732 ioa_cfg->ipr_cmd_pool = NULL;
8737 * @ioa_cfg: ioa cfg struct
8742 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8746 kfree(ioa_cfg->res_entries);
8747 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8748 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8749 ipr_free_cmd_blks(ioa_cfg);
8751 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8752 dma_free_coherent(&ioa_cfg->pdev->dev,
8753 sizeof(u32) * ioa_cfg->hrrq[i].size,
8754 ioa_cfg->hrrq[i].host_rrq,
8755 ioa_cfg->hrrq[i].host_rrq_dma);
8757 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8758 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
8761 dma_free_coherent(&ioa_cfg->pdev->dev,
8763 ioa_cfg->hostrcb[i],
8764 ioa_cfg->hostrcb_dma[i]);
8767 ipr_free_dump(ioa_cfg);
8768 kfree(ioa_cfg->trace);
8773 * @ioa_cfg: ipr cfg struct
8781 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
8783 struct pci_dev *pdev = ioa_cfg->pdev;
8786 for (i = 0; i < ioa_cfg->nvectors; i++)
8787 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
8793 * @ioa_cfg: ioa config struct
8801 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8803 struct pci_dev *pdev = ioa_cfg->pdev;
8806 ipr_free_irqs(ioa_cfg);
8807 if (ioa_cfg->reset_work_q)
8808 destroy_workqueue(ioa_cfg->reset_work_q);
8809 iounmap(ioa_cfg->hdw_dma_regs);
8811 ipr_free_mem(ioa_cfg);
8812 scsi_host_put(ioa_cfg->host);
8819 * @ioa_cfg: ioa config struct
8824 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8831 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
8834 if (!ioa_cfg->ipr_cmd_pool)
8837 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8838 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8840 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8841 ipr_free_cmd_blks(ioa_cfg);
8845 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8846 if (ioa_cfg->hrrq_num > 1) {
8849 ioa_cfg->hrrq[i].min_cmd_id = 0;
8850 ioa_cfg->hrrq[i].max_cmd_id =
8855 (ioa_cfg->hrrq_num - 1);
8856 ioa_cfg->hrrq[i].min_cmd_id =
8859 ioa_cfg->hrrq[i].max_cmd_id =
8865 ioa_cfg->hrrq[i].min_cmd_id = 0;
8866 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8868 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8871 BUG_ON(ioa_cfg->hrrq_num == 0);
8874 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8876 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8877 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8881 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
8885 ipr_free_cmd_blks(ioa_cfg);
8889 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8890 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8894 if (ioa_cfg->sis64)
8900 if (ioa_cfg->sis64) {
8914 ipr_cmd->ioa_cfg = ioa_cfg;
8919 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8921 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8930 * @ioa_cfg: ioa config struct
8935 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8937 struct pci_dev *pdev = ioa_cfg->pdev;
8941 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
8945 if (!ioa_cfg->res_entries)
8948 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8949 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8950 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8953 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
8955 &ioa_cfg->vpd_cbs_dma,
8958 if (!ioa_cfg->vpd_cbs)
8961 if (ipr_alloc_cmd_blks(ioa_cfg))
8964 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8965 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
8966 sizeof(u32) * ioa_cfg->hrrq[i].size,
8967 &ioa_cfg->hrrq[i].host_rrq_dma,
8970 if (!ioa_cfg->hrrq[i].host_rrq) {
8973 sizeof(u32) * ioa_cfg->hrrq[i].size,
8974 ioa_cfg->hrrq[i].host_rrq,
8975 ioa_cfg->hrrq[i].host_rrq_dma);
8978 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
8981 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
8982 ioa_cfg->cfg_table_size,
8983 &ioa_cfg->cfg_table_dma,
8986 if (!ioa_cfg->u.cfg_table)
8990 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
8992 &ioa_cfg->hostrcb_dma[i],
8995 if (!ioa_cfg->hostrcb[i])
8998 ioa_cfg->hostrcb[i]->hostrcb_dma =
8999 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9000 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9001 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9004 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9008 if (!ioa_cfg->trace)
9019 ioa_cfg->hostrcb[i],
9020 ioa_cfg->hostrcb_dma[i]);
9022 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9023 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9025 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9027 sizeof(u32) * ioa_cfg->hrrq[i].size,
9028 ioa_cfg->hrrq[i].host_rrq,
9029 ioa_cfg->hrrq[i].host_rrq_dma);
9032 ipr_free_cmd_blks(ioa_cfg);
9035 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9037 kfree(ioa_cfg->res_entries);
9043 * @ioa_cfg: ioa config struct
9048 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9053 ioa_cfg->bus_attr[i].bus = i;
9054 ioa_cfg->bus_attr[i].qas_enabled = 0;
9055 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9057 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9059 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9065 * @ioa_cfg: ioa config struct
9070 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9076 p = &ioa_cfg->chip_cfg->regs;
9077 t = &ioa_cfg->regs;
9078 base = ioa_cfg->hdw_dma_regs;
9097 if (ioa_cfg->sis64) {
9107 * @ioa_cfg: ioa config struct
9114 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9119 ioa_cfg->host = host;
9120 ioa_cfg->pdev = pdev;
9121 ioa_cfg->log_level = ipr_log_level;
9122 ioa_cfg->doorbell = IPR_DOORBELL;
9123 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9124 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9125 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9126 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9127 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9128 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9130 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9131 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9132 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9133 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9134 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9135 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9136 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9137 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9138 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9139 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9140 ioa_cfg->sdt_state = INACTIVE;
9142 ipr_initialize_bus_attr(ioa_cfg);
9143 ioa_cfg->max_devs_supported = ipr_max_devs;
9145 if (ioa_cfg->sis64) {
9150 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9151 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9153 * ioa_cfg->max_devs_supported)));
9159 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9160 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9162 * ioa_cfg->max_devs_supported)));
9167 host->can_queue = ioa_cfg->max_cmds;
9168 pci_set_drvdata(pdev, ioa_cfg);
9170 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9171 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9172 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9173 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9175 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9177 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9203 * @ioa_cfg: ioa config struct
9208 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9210 struct pci_dev *pdev = ioa_cfg->pdev;
9213 wait_event_timeout(ioa_cfg->eeh_wait_q,
9220 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9222 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9224 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9225 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9226 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9227 ioa_cfg->vectors_info[vec_idx].
9228 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9232 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
9237 for (i = 1; i < ioa_cfg->nvectors; i++) {
9241 ioa_cfg->vectors_info[i].desc,
9242 &ioa_cfg->hrrq[i]);
9246 &ioa_cfg->hrrq[i]);
9266 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9269 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9270 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9272 ioa_cfg->msi_received = 1;
9273 wake_up(&ioa_cfg->msi_wait_q);
9275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9281 * @ioa_cfg: ioa config struct
9291 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9300 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9301 ioa_cfg->msi_received = 0;
9302 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9303 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9304 readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9305 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9307 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9314 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9315 readl(ioa_cfg->regs.sense_interrupt_reg);
9316 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9317 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9318 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9320 if (!ioa_cfg->msi_received) {
9327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9329 free_irq(irq, ioa_cfg);
9346 struct ipr_ioa_cfg *ioa_cfg;
9358 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9366 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9367 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9369 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9371 if (!ioa_cfg->ipr_chip) {
9378 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9379 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9380 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9381 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9384 ioa_cfg->transop_timeout = ipr_transop_timeout;
9386 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9388 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9390 ioa_cfg->revid = pdev->revision;
9392 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9407 ipr_wait_for_pci_err_recovery(ioa_cfg);
9413 ipr_wait_for_pci_err_recovery(ioa_cfg);
9427 ioa_cfg->hdw_dma_regs = ipr_regs;
9428 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9429 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9431 ipr_init_regs(ioa_cfg);
9433 if (ioa_cfg->sis64) {
9449 ioa_cfg->chip_cfg->cache_line_size);
9453 ipr_wait_for_pci_err_recovery(ioa_cfg);
9459 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9460 ipr_wait_for_pci_err_recovery(ioa_cfg);
9469 if (ioa_cfg->ipr_chip->has_msi)
9473 ipr_wait_for_pci_err_recovery(ioa_cfg);
9476 ioa_cfg->nvectors = rc;
9479 ioa_cfg->clear_isr = 1;
9484 ipr_wait_for_pci_err_recovery(ioa_cfg);
9493 rc = ipr_test_msi(ioa_cfg, pdev);
9497 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
9501 ipr_wait_for_pci_err_recovery(ioa_cfg);
9504 ioa_cfg->nvectors = 1;
9505 ioa_cfg->clear_isr = 1;
9512 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9516 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9519 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9522 rc = ipr_alloc_mem(ioa_cfg);
9542 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9543 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9544 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9546 ioa_cfg->needs_hard_reset = 1;
9548 ioa_cfg->needs_hard_reset = 1;
9550 ioa_cfg->ioa_unit_checked = 1;
9552 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9553 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9554 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9557 name_msi_vectors(ioa_cfg);
9559 ioa_cfg->vectors_info[0].desc,
9560 &ioa_cfg->hrrq[0]);
9562 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
9566 IPR_NAME, &ioa_cfg->hrrq[0]);
9575 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9576 ioa_cfg->needs_warm_reset = 1;
9577 ioa_cfg->reset = ipr_reset_slot_reset;
9579 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
9582 if (!ioa_cfg->reset_work_q) {
9588 ioa_cfg->reset = ipr_reset_start_bist;
9591 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9599 ipr_free_irqs(ioa_cfg);
9601 ipr_free_mem(ioa_cfg);
9603 ipr_wait_for_pci_err_recovery(ioa_cfg);
9618 * @ioa_cfg: ioa config struct
9630 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9634 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9635 ioa_cfg->sdt_state = ABORT_DUMP;
9636 ioa_cfg->reset_retries = 0;
9637 ioa_cfg->in_ioa_bringdown = 1;
9638 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9654 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9659 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9660 while (ioa_cfg->in_reset_reload) {
9661 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9662 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9663 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9666 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9667 spin_lock(&ioa_cfg->hrrq[i]._lock);
9668 ioa_cfg->hrrq[i].removing_ioa = 1;
9669 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9672 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9674 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9675 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9676 flush_work(&ioa_cfg->work_q);
9677 if (ioa_cfg->reset_work_q)
9678 flush_workqueue(ioa_cfg->reset_work_q);
9679 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9680 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9683 list_del(&ioa_cfg->queue);
9686 if (ioa_cfg->sdt_state == ABORT_DUMP)
9687 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9688 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9690 ipr_free_all_resources(ioa_cfg);
9706 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9710 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9712 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9714 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
9716 scsi_remove_host(ioa_cfg->host);
9733 struct ipr_ioa_cfg *ioa_cfg;
9742 ioa_cfg = pci_get_drvdata(pdev);
9743 ipr_probe_ioa_part2(ioa_cfg);
9745 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9752 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9756 scsi_remove_host(ioa_cfg->host);
9761 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
9765 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9767 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9769 scsi_remove_host(ioa_cfg->host);
9774 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9778 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
9780 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9782 scsi_remove_host(ioa_cfg->host);
9786 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9787 ioa_cfg->scan_enabled = 1;
9788 schedule_work(&ioa_cfg->work_q);
9789 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9791 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9793 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9794 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9795 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
9796 ioa_cfg->iopoll_weight, ipr_iopoll);
9800 scsi_scan_host(ioa_cfg->host);
9817 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9822 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9823 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9824 ioa_cfg->iopoll_weight = 0;
9825 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9826 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
9829 while (ioa_cfg->in_reset_reload) {
9830 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9831 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9832 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9835 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
9838 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
9839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9840 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9841 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
9842 ipr_free_irqs(ioa_cfg);
9843 pci_disable_device(ioa_cfg->pdev);
10000 struct ipr_ioa_cfg *ioa_cfg;
10008 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10009 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10010 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10011 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10012 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10016 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);