Lines Matching +full:sml +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microchip PQI-based storage controllers
4 * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
22 #include <linux/blk-mq-pci.h>
36 #define DRIVER_VERSION "2.1.26-030"
51 #define PQI_NO_COMPLETION ((void *)-1)
194 "RAID-0",
195 "RAID-4",
196 "RAID-1(1+0)",
197 "RAID-5",
198 "RAID-5+1",
199 "RAID-6",
200 "RAID-1(Triple)",
229 sdev->no_write_same = 1; in pqi_disable_write_same()
239 return !device->is_physical_device; in pqi_is_logical_device()
249 return !ctrl_info->controller_online; in pqi_ctrl_offline()
254 if (ctrl_info->controller_online) in pqi_check_ctrl_health()
308 ctrl_info->scan_blocked = true; in pqi_ctrl_block_scan()
309 mutex_lock(&ctrl_info->scan_mutex); in pqi_ctrl_block_scan()
314 ctrl_info->scan_blocked = false; in pqi_ctrl_unblock_scan()
315 mutex_unlock(&ctrl_info->scan_mutex); in pqi_ctrl_unblock_scan()
320 return ctrl_info->scan_blocked; in pqi_ctrl_scan_blocked()
325 mutex_lock(&ctrl_info->lun_reset_mutex); in pqi_ctrl_block_device_reset()
330 mutex_unlock(&ctrl_info->lun_reset_mutex); in pqi_ctrl_unblock_device_reset()
339 shost = ctrl_info->scsi_host; in pqi_scsi_block_requests()
355 scsi_unblock_requests(ctrl_info->scsi_host); in pqi_scsi_unblock_requests()
360 atomic_inc(&ctrl_info->num_busy_threads); in pqi_ctrl_busy()
365 atomic_dec(&ctrl_info->num_busy_threads); in pqi_ctrl_unbusy()
370 return ctrl_info->block_requests; in pqi_ctrl_blocked()
375 ctrl_info->block_requests = true; in pqi_ctrl_block_requests()
380 ctrl_info->block_requests = false; in pqi_ctrl_unblock_requests()
381 wake_up_all(&ctrl_info->block_requests_wait); in pqi_ctrl_unblock_requests()
389 atomic_inc(&ctrl_info->num_blocked_threads); in pqi_wait_if_ctrl_blocked()
390 wait_event(ctrl_info->block_requests_wait, in pqi_wait_if_ctrl_blocked()
392 atomic_dec(&ctrl_info->num_blocked_threads); in pqi_wait_if_ctrl_blocked()
407 while (atomic_read(&ctrl_info->num_busy_threads) > in pqi_ctrl_wait_until_quiesced()
408 atomic_read(&ctrl_info->num_blocked_threads)) { in pqi_ctrl_wait_until_quiesced()
410 dev_warn(&ctrl_info->pci_dev->dev, in pqi_ctrl_wait_until_quiesced()
412 jiffies_to_msecs(jiffies - start_jiffies) / 1000); in pqi_ctrl_wait_until_quiesced()
420 dev_warn(&ctrl_info->pci_dev->dev, in pqi_ctrl_wait_until_quiesced()
422 jiffies_to_msecs(jiffies - start_jiffies) / 1000); in pqi_ctrl_wait_until_quiesced()
427 return device->device_offline; in pqi_device_offline()
432 mutex_lock(&ctrl_info->ofa_mutex); in pqi_ctrl_ofa_start()
437 mutex_unlock(&ctrl_info->ofa_mutex); in pqi_ctrl_ofa_done()
442 mutex_lock(&ctrl_info->ofa_mutex); in pqi_wait_until_ofa_finished()
443 mutex_unlock(&ctrl_info->ofa_mutex); in pqi_wait_until_ofa_finished()
448 return mutex_is_locked(&ctrl_info->ofa_mutex); in pqi_ofa_in_progress()
453 device->in_remove = true; in pqi_device_remove_start()
458 return device->in_remove; in pqi_device_in_remove()
463 device->in_reset[lun] = true; in pqi_device_reset_start()
468 device->in_reset[lun] = false; in pqi_device_reset_done()
473 return device->in_reset[lun]; in pqi_device_in_reset()
484 return -1; in pqi_event_type_to_event_index()
489 return pqi_event_type_to_event_index(event_type) != -1; in pqi_is_supported_event()
498 schedule_delayed_work(&ctrl_info->rescan_work, delay); in pqi_schedule_rescan_worker_with_delay()
515 cancel_delayed_work_sync(&ctrl_info->rescan_work); in pqi_cancel_rescan_worker()
520 if (!ctrl_info->heartbeat_counter) in pqi_read_heartbeat_counter()
523 return readl(ctrl_info->heartbeat_counter); in pqi_read_heartbeat_counter()
528 return readb(ctrl_info->soft_reset_status); in pqi_read_soft_reset_status()
537 writeb(status, ctrl_info->soft_reset_status); in pqi_clear_soft_reset_status()
547 if (device->ncq_prio_enable) { in pqi_is_io_high_priority()
552 switch (scmd->cmnd[0]) { in pqi_is_io_high_priority()
579 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, in pqi_map_single()
581 if (dma_mapping_error(&pci_dev->dev, bus_address)) in pqi_map_single()
582 return -ENOMEM; in pqi_map_single()
584 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); in pqi_map_single()
585 put_unaligned_le32(buffer_length, &sg_descriptor->length); in pqi_map_single()
586 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); in pqi_map_single()
601 dma_unmap_single(&pci_dev->dev, in pqi_pci_unmap()
617 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; in pqi_build_raid_path_request()
619 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, in pqi_build_raid_path_request()
620 &request->header.iu_length); in pqi_build_raid_path_request()
621 put_unaligned_le32(buffer_length, &request->buffer_length); in pqi_build_raid_path_request()
622 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); in pqi_build_raid_path_request()
623 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; in pqi_build_raid_path_request()
624 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; in pqi_build_raid_path_request()
626 cdb = request->cdb; in pqi_build_raid_path_request()
630 request->data_direction = SOP_READ_FLAG; in pqi_build_raid_path_request()
640 request->data_direction = SOP_READ_FLAG; in pqi_build_raid_path_request()
643 if (ctrl_info->rpl_extended_format_4_5_supported) in pqi_build_raid_path_request()
648 cdb[1] = ctrl_info->ciss_report_log_flags; in pqi_build_raid_path_request()
653 request->data_direction = SOP_READ_FLAG; in pqi_build_raid_path_request()
659 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST; in pqi_build_raid_path_request()
660 request->data_direction = SOP_WRITE_FLAG; in pqi_build_raid_path_request()
672 request->data_direction = SOP_READ_FLAG; in pqi_build_raid_path_request()
681 request->data_direction = SOP_WRITE_FLAG; in pqi_build_raid_path_request()
687 request->data_direction = SOP_BIDIRECTIONAL; in pqi_build_raid_path_request()
694 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); in pqi_build_raid_path_request()
698 switch (request->data_direction) { in pqi_build_raid_path_request()
713 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], in pqi_build_raid_path_request()
719 io_request->scmd = NULL; in pqi_reinit_io_request()
720 io_request->status = 0; in pqi_reinit_io_request()
721 io_request->error_info = NULL; in pqi_reinit_io_request()
722 io_request->raid_bypass = false; in pqi_reinit_io_request()
730 if (scmd) { /* SML I/O request */ in pqi_alloc_io_request()
734 io_request = &ctrl_info->io_request_pool[i]; in pqi_alloc_io_request()
735 if (atomic_inc_return(&io_request->refcount) > 1) { in pqi_alloc_io_request()
736 atomic_dec(&io_request->refcount); in pqi_alloc_io_request()
741 * benignly racy - may have to wait for an open slot. in pqi_alloc_io_request()
742 * command slot range is scsi_ml_can_queue - in pqi_alloc_io_request()
743 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)] in pqi_alloc_io_request()
747 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i]; in pqi_alloc_io_request()
748 if (atomic_inc_return(&io_request->refcount) == 1) in pqi_alloc_io_request()
750 atomic_dec(&io_request->refcount); in pqi_alloc_io_request()
763 atomic_dec(&io_request->refcount); in pqi_free_io_request()
781 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_send_scsi_raid_request()
840 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); in pqi_identify_physical_device()
846 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_identify_physical_device()
879 max_write_raid_1_10_3drive) - \
891 return -ENOMEM; in pqi_get_advanced_raid_bypass_config()
903 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_get_advanced_raid_bypass_config()
908 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE || in pqi_get_advanced_raid_bypass_config()
909 buffer->header.subpage_code != in pqi_get_advanced_raid_bypass_config()
911 get_unaligned_le16(&buffer->header.buffer_length) < in pqi_get_advanced_raid_bypass_config()
913 buffer->aio_subpage.header.page_code != in pqi_get_advanced_raid_bypass_config()
915 buffer->aio_subpage.header.subpage_code != in pqi_get_advanced_raid_bypass_config()
917 get_unaligned_le16(&buffer->aio_subpage.header.page_length) < in pqi_get_advanced_raid_bypass_config()
922 ctrl_info->max_transfer_encrypted_sas_sata = in pqi_get_advanced_raid_bypass_config()
924 &buffer->aio_subpage.max_transfer_encrypted_sas_sata); in pqi_get_advanced_raid_bypass_config()
926 ctrl_info->max_transfer_encrypted_nvme = in pqi_get_advanced_raid_bypass_config()
928 &buffer->aio_subpage.max_transfer_encrypted_nvme); in pqi_get_advanced_raid_bypass_config()
930 ctrl_info->max_write_raid_5_6 = in pqi_get_advanced_raid_bypass_config()
932 &buffer->aio_subpage.max_write_raid_5_6); in pqi_get_advanced_raid_bypass_config()
934 ctrl_info->max_write_raid_1_10_2drive = in pqi_get_advanced_raid_bypass_config()
936 &buffer->aio_subpage.max_write_raid_1_10_2drive); in pqi_get_advanced_raid_bypass_config()
938 ctrl_info->max_write_raid_1_10_3drive = in pqi_get_advanced_raid_bypass_config()
940 &buffer->aio_subpage.max_write_raid_1_10_3drive); in pqi_get_advanced_raid_bypass_config()
956 return -ENOMEM; in pqi_flush_cache()
958 flush_cache->shutdown_event = shutdown_event; in pqi_flush_cache()
985 return -ENOMEM; in pqi_set_diag_rescan()
992 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); in pqi_set_diag_rescan()
1034 return -ENOMEM; in pqi_write_driver_version_to_host_wellness()
1036 buffer->start_tag[0] = '<'; in pqi_write_driver_version_to_host_wellness()
1037 buffer->start_tag[1] = 'H'; in pqi_write_driver_version_to_host_wellness()
1038 buffer->start_tag[2] = 'W'; in pqi_write_driver_version_to_host_wellness()
1039 buffer->start_tag[3] = '>'; in pqi_write_driver_version_to_host_wellness()
1040 buffer->driver_version_tag[0] = 'D'; in pqi_write_driver_version_to_host_wellness()
1041 buffer->driver_version_tag[1] = 'V'; in pqi_write_driver_version_to_host_wellness()
1042 put_unaligned_le16(sizeof(buffer->driver_version), in pqi_write_driver_version_to_host_wellness()
1043 &buffer->driver_version_length); in pqi_write_driver_version_to_host_wellness()
1044 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, in pqi_write_driver_version_to_host_wellness()
1045 sizeof(buffer->driver_version) - 1); in pqi_write_driver_version_to_host_wellness()
1046 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; in pqi_write_driver_version_to_host_wellness()
1047 buffer->dont_write_tag[0] = 'D'; in pqi_write_driver_version_to_host_wellness()
1048 buffer->dont_write_tag[1] = 'W'; in pqi_write_driver_version_to_host_wellness()
1049 buffer->end_tag[0] = 'Z'; in pqi_write_driver_version_to_host_wellness()
1050 buffer->end_tag[1] = 'Z'; in pqi_write_driver_version_to_host_wellness()
1086 return -ENOMEM; in pqi_write_current_time_to_host_wellness()
1088 buffer->start_tag[0] = '<'; in pqi_write_current_time_to_host_wellness()
1089 buffer->start_tag[1] = 'H'; in pqi_write_current_time_to_host_wellness()
1090 buffer->start_tag[2] = 'W'; in pqi_write_current_time_to_host_wellness()
1091 buffer->start_tag[3] = '>'; in pqi_write_current_time_to_host_wellness()
1092 buffer->time_tag[0] = 'T'; in pqi_write_current_time_to_host_wellness()
1093 buffer->time_tag[1] = 'D'; in pqi_write_current_time_to_host_wellness()
1094 put_unaligned_le16(sizeof(buffer->time), in pqi_write_current_time_to_host_wellness()
1095 &buffer->time_length); in pqi_write_current_time_to_host_wellness()
1098 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); in pqi_write_current_time_to_host_wellness()
1101 buffer->time[0] = bin2bcd(tm.tm_hour); in pqi_write_current_time_to_host_wellness()
1102 buffer->time[1] = bin2bcd(tm.tm_min); in pqi_write_current_time_to_host_wellness()
1103 buffer->time[2] = bin2bcd(tm.tm_sec); in pqi_write_current_time_to_host_wellness()
1104 buffer->time[3] = 0; in pqi_write_current_time_to_host_wellness()
1105 buffer->time[4] = bin2bcd(tm.tm_mon + 1); in pqi_write_current_time_to_host_wellness()
1106 buffer->time[5] = bin2bcd(tm.tm_mday); in pqi_write_current_time_to_host_wellness()
1107 buffer->time[6] = bin2bcd(year / 100); in pqi_write_current_time_to_host_wellness()
1108 buffer->time[7] = bin2bcd(year % 100); in pqi_write_current_time_to_host_wellness()
1110 buffer->dont_write_tag[0] = 'D'; in pqi_write_current_time_to_host_wellness()
1111 buffer->dont_write_tag[1] = 'W'; in pqi_write_current_time_to_host_wellness()
1112 buffer->end_tag[0] = 'Z'; in pqi_write_current_time_to_host_wellness()
1113 buffer->end_tag[1] = 'Z'; in pqi_write_current_time_to_host_wellness()
1134 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_time_worker()
1137 schedule_delayed_work(&ctrl_info->update_time_work, in pqi_update_time_worker()
1143 schedule_delayed_work(&ctrl_info->update_time_work, 0); in pqi_schedule_update_time_worker()
1148 cancel_delayed_work_sync(&ctrl_info->update_time_work); in pqi_cancel_update_time_worker()
1168 rc = -ENOMEM; in pqi_report_phys_logical_luns()
1176 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); in pqi_report_phys_logical_luns()
1183 rc = -ENOMEM; in pqi_report_phys_logical_luns()
1197 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length); in pqi_report_phys_logical_luns()
1233 if (ctrl_info->rpl_extended_format_4_5_supported) { in pqi_report_phys_luns()
1235 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK; in pqi_report_phys_luns()
1240 dev_err(&ctrl_info->pci_dev->dev, in pqi_report_phys_luns()
1243 return -EINVAL; in pqi_report_phys_luns()
1245 dev_warn(&ctrl_info->pci_dev->dev, in pqi_report_phys_luns()
1251 …physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_l… in pqi_report_phys_luns()
1256 return -ENOMEM; in pqi_report_phys_luns()
1259 &rpl_16byte_wwid_list->header.list_length); in pqi_report_phys_luns()
1260 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags; in pqi_report_phys_luns()
1263 …memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, si… in pqi_report_phys_luns()
1264 …memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, s… in pqi_report_phys_luns()
1265 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8); in pqi_report_phys_luns()
1266 …rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type; in pqi_report_phys_luns()
1267 …rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_fla… in pqi_report_phys_luns()
1268 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count; in pqi_report_phys_luns()
1269 …rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redunda… in pqi_report_phys_luns()
1270 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle; in pqi_report_phys_luns()
1297 dev_err(&ctrl_info->pci_dev->dev, in pqi_get_device_lists()
1302 dev_err(&ctrl_info->pci_dev->dev, in pqi_get_device_lists()
1314 get_unaligned_be32(&logdev_data->header.list_length); in pqi_get_device_lists()
1330 return -ENOMEM; in pqi_get_device_lists()
1338 &internal_logdev_list->header.list_length); in pqi_get_device_lists()
1349 device->bus = bus; in pqi_set_bus_target_lun()
1350 device->target = target; in pqi_set_bus_target_lun()
1351 device->lun = lun; in pqi_set_bus_target_lun()
1362 scsi3addr = device->scsi3addr; in pqi_assign_bus_target_lun()
1368 device->target_lun_valid = true; in pqi_assign_bus_target_lun()
1373 if (device->is_external_raid_device) { in pqi_assign_bus_target_lun()
1383 device->target_lun_valid = true; in pqi_assign_bus_target_lun()
1388 * Defer target and LUN assignment for non-controller physical devices in pqi_assign_bus_target_lun()
1405 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_raid_level()
1415 device->raid_level = raid_level; in pqi_get_raid_level()
1425 raid_map_size = get_unaligned_le32(&raid_map->structure_size); in pqi_validate_raid_map()
1432 if (device->raid_level == SA_RAID_1) { in pqi_validate_raid_map()
1433 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { in pqi_validate_raid_map()
1434 err_msg = "invalid RAID-1 map"; in pqi_validate_raid_map()
1437 } else if (device->raid_level == SA_RAID_TRIPLE) { in pqi_validate_raid_map()
1438 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { in pqi_validate_raid_map()
1439 err_msg = "invalid RAID-1(Triple) map"; in pqi_validate_raid_map()
1442 } else if ((device->raid_level == SA_RAID_5 || in pqi_validate_raid_map()
1443 device->raid_level == SA_RAID_6) && in pqi_validate_raid_map()
1444 get_unaligned_le16(&raid_map->layout_map_count) > 1) { in pqi_validate_raid_map()
1447 get_unaligned_le16(&raid_map->strip_size) * in pqi_validate_raid_map()
1448 get_unaligned_le16(&raid_map->data_disks_per_row); in pqi_validate_raid_map()
1450 err_msg = "invalid RAID-5 or RAID-6 map"; in pqi_validate_raid_map()
1458 dev_warn(&ctrl_info->pci_dev->dev, in pqi_validate_raid_map()
1460 *((u32 *)&device->scsi3addr), in pqi_validate_raid_map()
1461 *((u32 *)&device->scsi3addr[4]), err_msg); in pqi_validate_raid_map()
1463 return -EINVAL; in pqi_validate_raid_map()
1475 return -ENOMEM; in pqi_get_raid_map()
1478 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL); in pqi_get_raid_map()
1482 raid_map_size = get_unaligned_le32(&raid_map->structure_size); in pqi_get_raid_map()
1490 return -ENOMEM; in pqi_get_raid_map()
1493 device->scsi3addr, raid_map, raid_map_size, 0, NULL); in pqi_get_raid_map()
1497 if (get_unaligned_le32(&raid_map->structure_size) in pqi_get_raid_map()
1499 dev_warn(&ctrl_info->pci_dev->dev, in pqi_get_raid_map()
1502 get_unaligned_le32(&raid_map->structure_size)); in pqi_get_raid_map()
1503 rc = -EINVAL; in pqi_get_raid_map()
1512 device->raid_map = raid_map; in pqi_get_raid_map()
1525 if (!ctrl_info->lv_drive_type_mix_valid) { in pqi_set_max_transfer_encrypted()
1526 device->max_transfer_encrypted = ~0; in pqi_set_max_transfer_encrypted()
1530 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) { in pqi_set_max_transfer_encrypted()
1538 device->max_transfer_encrypted = in pqi_set_max_transfer_encrypted()
1539 ctrl_info->max_transfer_encrypted_sas_sata; in pqi_set_max_transfer_encrypted()
1542 device->max_transfer_encrypted = in pqi_set_max_transfer_encrypted()
1543 ctrl_info->max_transfer_encrypted_nvme; in pqi_set_max_transfer_encrypted()
1548 device->max_transfer_encrypted = in pqi_set_max_transfer_encrypted()
1549 min(ctrl_info->max_transfer_encrypted_sas_sata, in pqi_set_max_transfer_encrypted()
1550 ctrl_info->max_transfer_encrypted_nvme); in pqi_set_max_transfer_encrypted()
1566 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_raid_bypass_status()
1576 device->raid_bypass_configured = in pqi_get_raid_bypass_status()
1578 if (device->raid_bypass_configured && in pqi_get_raid_bypass_status()
1581 device->raid_bypass_enabled = true; in pqi_get_raid_bypass_status()
1582 if (get_unaligned_le16(&device->raid_map->flags) & in pqi_get_raid_bypass_status()
1592 * Use vendor-specific VPD to determine online/offline status of a volume.
1609 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_volume_status()
1614 if (vpd->page_code != CISS_VPD_LV_STATUS) in pqi_get_volume_status()
1618 volume_status) + vpd->page_length; in pqi_get_volume_status()
1622 volume_status = vpd->volume_status; in pqi_get_volume_status()
1623 volume_flags = get_unaligned_be32(&vpd->flags); in pqi_get_volume_status()
1629 device->volume_status = volume_status; in pqi_get_volume_status()
1630 device->volume_offline = volume_offline; in pqi_get_volume_status()
1648 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; in pqi_get_physical_device_info()
1652 scsi_sanitize_inquiry_string(&id_phys->model[0], 8); in pqi_get_physical_device_info()
1653 scsi_sanitize_inquiry_string(&id_phys->model[8], 16); in pqi_get_physical_device_info()
1655 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); in pqi_get_physical_device_info()
1656 memcpy(device->model, &id_phys->model[8], sizeof(device->model)); in pqi_get_physical_device_info()
1658 device->box_index = id_phys->box_index; in pqi_get_physical_device_info()
1659 device->phys_box_on_bus = id_phys->phys_box_on_bus; in pqi_get_physical_device_info()
1660 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; in pqi_get_physical_device_info()
1661 device->queue_depth = in pqi_get_physical_device_info()
1662 get_unaligned_le16(&id_phys->current_queue_depth_limit); in pqi_get_physical_device_info()
1663 device->active_path_index = id_phys->active_path_number; in pqi_get_physical_device_info()
1664 device->path_map = id_phys->redundant_path_present_map; in pqi_get_physical_device_info()
1665 memcpy(&device->box, in pqi_get_physical_device_info()
1666 &id_phys->alternate_paths_phys_box_on_port, in pqi_get_physical_device_info()
1667 sizeof(device->box)); in pqi_get_physical_device_info()
1668 memcpy(&device->phys_connector, in pqi_get_physical_device_info()
1669 &id_phys->alternate_paths_phys_connector, in pqi_get_physical_device_info()
1670 sizeof(device->phys_connector)); in pqi_get_physical_device_info()
1671 device->bay = id_phys->phys_bay_in_box; in pqi_get_physical_device_info()
1672 device->lun_count = id_phys->multi_lun_device_lun_count; in pqi_get_physical_device_info()
1673 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && in pqi_get_physical_device_info()
1674 id_phys->phy_count) in pqi_get_physical_device_info()
1675 device->phy_id = in pqi_get_physical_device_info()
1676 id_phys->phy_to_phy_map[device->active_path_index]; in pqi_get_physical_device_info()
1678 device->phy_id = 0xFF; in pqi_get_physical_device_info()
1680 device->ncq_prio_support = in pqi_get_physical_device_info()
1681 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) & in pqi_get_physical_device_info()
1684 …device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVI… in pqi_get_physical_device_info()
1697 return -ENOMEM; in pqi_get_logical_device_info()
1700 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); in pqi_get_logical_device_info()
1707 device->devtype = buffer[0] & 0x1f; in pqi_get_logical_device_info()
1708 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); in pqi_get_logical_device_info()
1709 memcpy(device->model, &buffer[16], sizeof(device->model)); in pqi_get_logical_device_info()
1711 if (device->devtype == TYPE_DISK) { in pqi_get_logical_device_info()
1712 if (device->is_external_raid_device) { in pqi_get_logical_device_info()
1713 device->raid_level = SA_RAID_UNKNOWN; in pqi_get_logical_device_info()
1714 device->volume_status = CISS_LV_OK; in pqi_get_logical_device_info()
1715 device->volume_offline = false; in pqi_get_logical_device_info()
1736 * Note: devices that have completed sanitize must be re-enabled
1741 return device->erase_in_progress; in pqi_keep_device_offline()
1750 if (device->is_expander_smp_device) in pqi_get_device_info_phys_logical()
1769 if (rc == 0 && device->lun_count == 0) in pqi_get_device_info()
1770 device->lun_count = 1; in pqi_get_device_info()
1783 switch (device->volume_status) { in pqi_show_volume_status()
1842 status = "Encrypted volume inaccessible - key not present"; in pqi_show_volume_status()
1848 status = "Volume undergoing encryption re-keying process"; in pqi_show_volume_status()
1867 unknown_state_str, device->volume_status); in pqi_show_volume_status()
1872 dev_info(&ctrl_info->pci_dev->dev, in pqi_show_volume_status()
1874 ctrl_info->scsi_host->host_no, in pqi_show_volume_status()
1875 device->bus, device->target, device->lun, status); in pqi_show_volume_status()
1894 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, in pqi_add_device()
1895 device->target, device->lun); in pqi_add_device()
1897 rc = pqi_add_sas_device(ctrl_info->sas_host, device); in pqi_add_device()
1909 for (lun = 0; lun < device->lun_count; lun++) { in pqi_remove_device()
1913 dev_err(&ctrl_info->pci_dev->dev, in pqi_remove_device()
1915 ctrl_info->scsi_host->host_no, device->bus, in pqi_remove_device()
1916 device->target, lun, in pqi_remove_device()
1917 atomic_read(&device->scsi_cmds_outstanding[lun])); in pqi_remove_device()
1921 scsi_remove_device(device->sdev); in pqi_remove_device()
1935 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_find_scsi_dev()
1936 if (device->bus == bus && device->target == target && device->lun == lun) in pqi_find_scsi_dev()
1944 if (dev1->is_physical_device != dev2->is_physical_device) in pqi_device_equal()
1947 if (dev1->is_physical_device) in pqi_device_equal()
1948 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0; in pqi_device_equal()
1950 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0; in pqi_device_equal()
1964 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_scsi_find_entry()
1965 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) { in pqi_scsi_find_entry()
1968 if (device_to_find->volume_offline) in pqi_scsi_find_entry()
1981 if (device->is_expander_smp_device) in pqi_device_type()
1984 return scsi_device_type(device->devtype); in pqi_device_type()
1996 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); in pqi_dev_info()
1998 if (device->target_lun_valid) in pqi_dev_info()
2000 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2002 device->target, in pqi_dev_info()
2003 device->lun); in pqi_dev_info()
2006 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2007 "-:-"); in pqi_dev_info()
2011 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2013 *((u32 *)&device->scsi3addr), in pqi_dev_info()
2014 *((u32 *)&device->scsi3addr[4])); in pqi_dev_info()
2017 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2019 get_unaligned_be64(&device->wwid[0]), in pqi_dev_info()
2020 get_unaligned_be64(&device->wwid[8])); in pqi_dev_info()
2022 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2025 device->vendor, in pqi_dev_info()
2026 device->model); in pqi_dev_info()
2029 if (device->devtype == TYPE_DISK) in pqi_dev_info()
2031 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2032 "SSDSmartPathCap%c En%c %-12s", in pqi_dev_info()
2033 device->raid_bypass_configured ? '+' : '-', in pqi_dev_info()
2034 device->raid_bypass_enabled ? '+' : '-', in pqi_dev_info()
2035 pqi_raid_level_to_string(device->raid_level)); in pqi_dev_info()
2038 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2039 "AIO%c", device->aio_enabled ? '+' : '-'); in pqi_dev_info()
2040 if (device->devtype == TYPE_DISK || in pqi_dev_info()
2041 device->devtype == TYPE_ZBC) in pqi_dev_info()
2043 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2044 " qd=%-6d", device->queue_depth); in pqi_dev_info()
2047 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); in pqi_dev_info()
2058 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size); in pqi_raid_maps_equal()
2059 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size); in pqi_raid_maps_equal()
2072 existing_device->device_type = new_device->device_type; in pqi_scsi_update_device()
2073 existing_device->bus = new_device->bus; in pqi_scsi_update_device()
2074 if (new_device->target_lun_valid) { in pqi_scsi_update_device()
2075 existing_device->target = new_device->target; in pqi_scsi_update_device()
2076 existing_device->lun = new_device->lun; in pqi_scsi_update_device()
2077 existing_device->target_lun_valid = true; in pqi_scsi_update_device()
2082 existing_device->is_physical_device = new_device->is_physical_device; in pqi_scsi_update_device()
2083 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor)); in pqi_scsi_update_device()
2084 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model)); in pqi_scsi_update_device()
2085 existing_device->sas_address = new_device->sas_address; in pqi_scsi_update_device()
2086 existing_device->queue_depth = new_device->queue_depth; in pqi_scsi_update_device()
2087 existing_device->device_offline = false; in pqi_scsi_update_device()
2088 existing_device->lun_count = new_device->lun_count; in pqi_scsi_update_device()
2091 existing_device->is_external_raid_device = new_device->is_external_raid_device; in pqi_scsi_update_device()
2093 if (existing_device->devtype == TYPE_DISK) { in pqi_scsi_update_device()
2094 existing_device->raid_level = new_device->raid_level; in pqi_scsi_update_device()
2095 existing_device->volume_status = new_device->volume_status; in pqi_scsi_update_device()
2096 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group)); in pqi_scsi_update_device()
2097 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) { in pqi_scsi_update_device()
2098 kfree(existing_device->raid_map); in pqi_scsi_update_device()
2099 existing_device->raid_map = new_device->raid_map; in pqi_scsi_update_device()
2101 new_device->raid_map = NULL; in pqi_scsi_update_device()
2103 existing_device->raid_bypass_configured = new_device->raid_bypass_configured; in pqi_scsi_update_device()
2104 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; in pqi_scsi_update_device()
2107 existing_device->aio_enabled = new_device->aio_enabled; in pqi_scsi_update_device()
2108 existing_device->aio_handle = new_device->aio_handle; in pqi_scsi_update_device()
2109 existing_device->is_expander_smp_device = new_device->is_expander_smp_device; in pqi_scsi_update_device()
2110 existing_device->active_path_index = new_device->active_path_index; in pqi_scsi_update_device()
2111 existing_device->phy_id = new_device->phy_id; in pqi_scsi_update_device()
2112 existing_device->path_map = new_device->path_map; in pqi_scsi_update_device()
2113 existing_device->bay = new_device->bay; in pqi_scsi_update_device()
2114 existing_device->box_index = new_device->box_index; in pqi_scsi_update_device()
2115 existing_device->phys_box_on_bus = new_device->phys_box_on_bus; in pqi_scsi_update_device()
2116 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; in pqi_scsi_update_device()
2117 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); in pqi_scsi_update_device()
2118 …memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_c… in pqi_scsi_update_device()
2125 kfree(device->raid_map); in pqi_free_device()
2131 * Called when exposing a new device to the OS fails in order to re-adjust
2140 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_fixup_botched_add()
2141 list_del(&device->scsi_device_list_entry); in pqi_fixup_botched_add()
2142 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_fixup_botched_add()
2145 device->keep_device = false; in pqi_fixup_botched_add()
2150 if (device->is_expander_smp_device) in pqi_is_device_added()
2151 return device->sas_port != NULL; in pqi_is_device_added()
2153 return device->sdev != NULL; in pqi_is_device_added()
2161 for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) in pqi_init_device_tmf_work()
2162 INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker); in pqi_init_device_tmf_work()
2170 if (device->sdev == NULL) in pqi_volume_rescan_needed()
2173 if (!scsi_device_online(device->sdev)) in pqi_volume_rescan_needed()
2176 return device->rescan; in pqi_volume_rescan_needed()
2199 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2202 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_update_device_list()
2203 device->device_gone = true; in pqi_update_device_list()
2217 device->new_device = false; in pqi_update_device_list()
2218 matching_device->device_gone = false; in pqi_update_device_list()
2226 device->new_device = true; in pqi_update_device_list()
2233 device->new_device = true; in pqi_update_device_list()
2239 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, in pqi_update_device_list()
2241 if (device->device_gone) { in pqi_update_device_list()
2242 list_del(&device->scsi_device_list_entry); in pqi_update_device_list()
2243 list_add_tail(&device->delete_list_entry, &delete_list); in pqi_update_device_list()
2250 if (!device->new_device) in pqi_update_device_list()
2252 if (device->volume_offline) in pqi_update_device_list()
2254 list_add_tail(&device->scsi_device_list_entry, in pqi_update_device_list()
2255 &ctrl_info->scsi_device_list); in pqi_update_device_list()
2256 list_add_tail(&device->add_list_entry, &add_list); in pqi_update_device_list()
2258 device->keep_device = true; in pqi_update_device_list()
2262 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2279 if (device->volume_offline) { in pqi_update_device_list()
2287 list_del(&device->delete_list_entry); in pqi_update_device_list()
2292 * Notify the SML of any existing device changes such as; in pqi_update_device_list()
2293 * queue depth, device size. in pqi_update_device_list()
2295 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_update_device_list()
2296 if (device->sdev && device->queue_depth != device->advertised_queue_depth) { in pqi_update_device_list()
2297 device->advertised_queue_depth = device->queue_depth; in pqi_update_device_list()
2298 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); in pqi_update_device_list()
2299 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2301 device->rescan = false; in pqi_update_device_list()
2302 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2303 scsi_rescan_device(device->sdev); in pqi_update_device_list()
2305 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2317 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_device_list()
2319 ctrl_info->scsi_host->host_no, in pqi_update_device_list()
2320 device->bus, device->target, in pqi_update_device_list()
2321 device->lun); in pqi_update_device_list()
2337 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && in pqi_is_supported_device()
2338 !pqi_is_hba_lunid(device->scsi3addr)) in pqi_is_supported_device()
2363 return (device->path_map & (device->path_map - 1)) != 0; in pqi_is_multipath_device()
2368 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); in pqi_expose_device()
2401 get_unaligned_be32(&physdev_list->header.list_length) in pqi_update_scsi_devices()
2402 / sizeof(physdev_list->lun_entries[0]); in pqi_update_scsi_devices()
2408 get_unaligned_be32(&logdev_list->header.list_length) in pqi_update_scsi_devices()
2409 / sizeof(logdev_list->lun_entries[0]); in pqi_update_scsi_devices()
2422 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2424 rc = -ENOMEM; in pqi_update_scsi_devices()
2429 for (i = num_physicals - 1; i >= 0; i--) { in pqi_update_scsi_devices()
2430 phys_lun = &physdev_list->lun_entries[i]; in pqi_update_scsi_devices()
2431 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) { in pqi_update_scsi_devices()
2432 pqi_mask_device(phys_lun->lunid); in pqi_update_scsi_devices()
2440 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX)) in pqi_update_scsi_devices()
2441 ctrl_info->lv_drive_type_mix_valid = true; in pqi_update_scsi_devices()
2449 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); in pqi_update_scsi_devices()
2450 rc = -ENOMEM; in pqi_update_scsi_devices()
2457 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2459 rc = -ENOMEM; in pqi_update_scsi_devices()
2462 list_add_tail(&device->new_device_list_entry, in pqi_update_scsi_devices()
2476 phys_lun = &physdev_list->lun_entries[physical_index++]; in pqi_update_scsi_devices()
2478 scsi3addr = phys_lun->lunid; in pqi_update_scsi_devices()
2482 log_lun = &logdev_list->lun_entries[logical_index++]; in pqi_update_scsi_devices()
2483 scsi3addr = log_lun->lunid; in pqi_update_scsi_devices()
2495 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); in pqi_update_scsi_devices()
2496 device->is_physical_device = is_physical_device; in pqi_update_scsi_devices()
2498 device->device_type = phys_lun->device_type; in pqi_update_scsi_devices()
2499 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) in pqi_update_scsi_devices()
2500 device->is_expander_smp_device = true; in pqi_update_scsi_devices()
2502 device->is_external_raid_device = in pqi_update_scsi_devices()
2511 if (rc == -ENOMEM) { in pqi_update_scsi_devices()
2512 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2517 if (device->is_physical_device) in pqi_update_scsi_devices()
2518 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_scsi_devices()
2520 get_unaligned_be64(&phys_lun->wwid[0]), in pqi_update_scsi_devices()
2521 get_unaligned_be64(&phys_lun->wwid[8])); in pqi_update_scsi_devices()
2523 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_scsi_devices()
2525 *((u32 *)&device->scsi3addr), in pqi_update_scsi_devices()
2526 *((u32 *)&device->scsi3addr[4])); in pqi_update_scsi_devices()
2537 if (device->is_physical_device) { in pqi_update_scsi_devices()
2538 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid)); in pqi_update_scsi_devices()
2539 if ((phys_lun->device_flags & in pqi_update_scsi_devices()
2541 phys_lun->aio_handle) { in pqi_update_scsi_devices()
2542 device->aio_enabled = true; in pqi_update_scsi_devices()
2543 device->aio_handle = in pqi_update_scsi_devices()
2544 phys_lun->aio_handle; in pqi_update_scsi_devices()
2547 memcpy(device->volume_id, log_lun->volume_id, in pqi_update_scsi_devices()
2548 sizeof(device->volume_id)); in pqi_update_scsi_devices()
2551 device->sas_address = get_unaligned_be64(&device->wwid[0]); in pqi_update_scsi_devices()
2561 if (device->keep_device) in pqi_update_scsi_devices()
2563 list_del(&device->new_device_list_entry); in pqi_update_scsi_devices()
2581 return -ENXIO; in pqi_scan_scsi_devices()
2583 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); in pqi_scan_scsi_devices()
2587 return -EBUSY; in pqi_scan_scsi_devices()
2589 return -EINPROGRESS; in pqi_scan_scsi_devices()
2596 mutex_unlock(&ctrl_info->scan_mutex); in pqi_scan_scsi_devices()
2619 return !mutex_is_locked(&ctrl_info->scan_mutex); in pqi_scan_finished()
2629 * If the block size is 512, the tweak value is equal to the LBA. in pqi_set_encryption_info()
2630 * For other block sizes, tweak value is (LBA * block size) / 512. in pqi_set_encryption_info()
2632 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); in pqi_set_encryption_info()
2636 encryption_info->data_encryption_key_index = in pqi_set_encryption_info()
2637 get_unaligned_le16(&raid_map->data_encryption_key_index); in pqi_set_encryption_info()
2638 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); in pqi_set_encryption_info()
2639 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); in pqi_set_encryption_info()
2651 switch (rmd->raid_level) { in pqi_aio_raid_level_supported()
2655 if (rmd->is_write && (!ctrl_info->enable_r1_writes || in pqi_aio_raid_level_supported()
2656 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) in pqi_aio_raid_level_supported()
2660 if (rmd->is_write && (!ctrl_info->enable_r1_writes || in pqi_aio_raid_level_supported()
2661 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) in pqi_aio_raid_level_supported()
2665 if (rmd->is_write && (!ctrl_info->enable_r5_writes || in pqi_aio_raid_level_supported()
2666 rmd->data_length > ctrl_info->max_write_raid_5_6)) in pqi_aio_raid_level_supported()
2670 if (rmd->is_write && (!ctrl_info->enable_r6_writes || in pqi_aio_raid_level_supported()
2671 rmd->data_length > ctrl_info->max_write_raid_5_6)) in pqi_aio_raid_level_supported()
2688 switch (scmd->cmnd[0]) { in pqi_get_aio_lba_and_block_count()
2690 rmd->is_write = true; in pqi_get_aio_lba_and_block_count()
2693 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | in pqi_get_aio_lba_and_block_count()
2694 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); in pqi_get_aio_lba_and_block_count()
2695 rmd->block_cnt = (u32)scmd->cmnd[4]; in pqi_get_aio_lba_and_block_count()
2696 if (rmd->block_cnt == 0) in pqi_get_aio_lba_and_block_count()
2697 rmd->block_cnt = 256; in pqi_get_aio_lba_and_block_count()
2700 rmd->is_write = true; in pqi_get_aio_lba_and_block_count()
2703 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); in pqi_get_aio_lba_and_block_count()
2704 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); in pqi_get_aio_lba_and_block_count()
2707 rmd->is_write = true; in pqi_get_aio_lba_and_block_count()
2710 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); in pqi_get_aio_lba_and_block_count()
2711 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]); in pqi_get_aio_lba_and_block_count()
2714 rmd->is_write = true; in pqi_get_aio_lba_and_block_count()
2717 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]); in pqi_get_aio_lba_and_block_count()
2718 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]); in pqi_get_aio_lba_and_block_count()
2725 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length); in pqi_get_aio_lba_and_block_count()
2737 rmd->last_block = rmd->first_block + rmd->block_cnt - 1; in pci_get_aio_common_raid_map_values()
2740 if (rmd->last_block >= in pci_get_aio_common_raid_map_values()
2741 get_unaligned_le64(&raid_map->volume_blk_cnt) || in pci_get_aio_common_raid_map_values()
2742 rmd->last_block < rmd->first_block) in pci_get_aio_common_raid_map_values()
2745 rmd->data_disks_per_row = in pci_get_aio_common_raid_map_values()
2746 get_unaligned_le16(&raid_map->data_disks_per_row); in pci_get_aio_common_raid_map_values()
2747 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size); in pci_get_aio_common_raid_map_values()
2748 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); in pci_get_aio_common_raid_map_values()
2751 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; in pci_get_aio_common_raid_map_values()
2752 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ in pci_get_aio_common_raid_map_values()
2755 tmpdiv = rmd->first_block; in pci_get_aio_common_raid_map_values()
2756 do_div(tmpdiv, rmd->blocks_per_row); in pci_get_aio_common_raid_map_values()
2757 rmd->first_row = tmpdiv; in pci_get_aio_common_raid_map_values()
2758 tmpdiv = rmd->last_block; in pci_get_aio_common_raid_map_values()
2759 do_div(tmpdiv, rmd->blocks_per_row); in pci_get_aio_common_raid_map_values()
2760 rmd->last_row = tmpdiv; in pci_get_aio_common_raid_map_values()
2761 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row)); in pci_get_aio_common_raid_map_values()
2762 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row)); in pci_get_aio_common_raid_map_values()
2763 tmpdiv = rmd->first_row_offset; in pci_get_aio_common_raid_map_values()
2764 do_div(tmpdiv, rmd->strip_size); in pci_get_aio_common_raid_map_values()
2765 rmd->first_column = tmpdiv; in pci_get_aio_common_raid_map_values()
2766 tmpdiv = rmd->last_row_offset; in pci_get_aio_common_raid_map_values()
2767 do_div(tmpdiv, rmd->strip_size); in pci_get_aio_common_raid_map_values()
2768 rmd->last_column = tmpdiv; in pci_get_aio_common_raid_map_values()
2770 rmd->first_row = rmd->first_block / rmd->blocks_per_row; in pci_get_aio_common_raid_map_values()
2771 rmd->last_row = rmd->last_block / rmd->blocks_per_row; in pci_get_aio_common_raid_map_values()
2772 rmd->first_row_offset = (u32)(rmd->first_block - in pci_get_aio_common_raid_map_values()
2773 (rmd->first_row * rmd->blocks_per_row)); in pci_get_aio_common_raid_map_values()
2774 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * in pci_get_aio_common_raid_map_values()
2775 rmd->blocks_per_row)); in pci_get_aio_common_raid_map_values()
2776 rmd->first_column = rmd->first_row_offset / rmd->strip_size; in pci_get_aio_common_raid_map_values()
2777 rmd->last_column = rmd->last_row_offset / rmd->strip_size; in pci_get_aio_common_raid_map_values()
2781 if (rmd->first_row != rmd->last_row || in pci_get_aio_common_raid_map_values()
2782 rmd->first_column != rmd->last_column) in pci_get_aio_common_raid_map_values()
2786 rmd->total_disks_per_row = rmd->data_disks_per_row + in pci_get_aio_common_raid_map_values()
2787 get_unaligned_le16(&raid_map->metadata_disks_per_row); in pci_get_aio_common_raid_map_values()
2788 rmd->map_row = ((u32)(rmd->first_row >> in pci_get_aio_common_raid_map_values()
2789 raid_map->parity_rotation_shift)) % in pci_get_aio_common_raid_map_values()
2790 get_unaligned_le16(&raid_map->row_cnt); in pci_get_aio_common_raid_map_values()
2791 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + in pci_get_aio_common_raid_map_values()
2792 rmd->first_column; in pci_get_aio_common_raid_map_values()
2804 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ in pqi_calc_aio_r5_or_r6()
2809 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; in pqi_calc_aio_r5_or_r6()
2811 tmpdiv = rmd->first_block; in pqi_calc_aio_r5_or_r6()
2812 rmd->first_group = do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2813 tmpdiv = rmd->first_group; in pqi_calc_aio_r5_or_r6()
2814 do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2815 rmd->first_group = tmpdiv; in pqi_calc_aio_r5_or_r6()
2816 tmpdiv = rmd->last_block; in pqi_calc_aio_r5_or_r6()
2817 rmd->last_group = do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2818 tmpdiv = rmd->last_group; in pqi_calc_aio_r5_or_r6()
2819 do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2820 rmd->last_group = tmpdiv; in pqi_calc_aio_r5_or_r6()
2822 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row; in pqi_calc_aio_r5_or_r6()
2823 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row; in pqi_calc_aio_r5_or_r6()
2825 if (rmd->first_group != rmd->last_group) in pqi_calc_aio_r5_or_r6()
2830 tmpdiv = rmd->first_block; in pqi_calc_aio_r5_or_r6()
2831 do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2832 rmd->first_row = tmpdiv; in pqi_calc_aio_r5_or_r6()
2833 rmd->r5or6_first_row = tmpdiv; in pqi_calc_aio_r5_or_r6()
2834 tmpdiv = rmd->last_block; in pqi_calc_aio_r5_or_r6()
2835 do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2836 rmd->r5or6_last_row = tmpdiv; in pqi_calc_aio_r5_or_r6()
2838 rmd->first_row = rmd->r5or6_first_row = in pqi_calc_aio_r5_or_r6()
2839 rmd->first_block / rmd->stripesize; in pqi_calc_aio_r5_or_r6()
2840 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; in pqi_calc_aio_r5_or_r6()
2842 if (rmd->r5or6_first_row != rmd->r5or6_last_row) in pqi_calc_aio_r5_or_r6()
2847 tmpdiv = rmd->first_block; in pqi_calc_aio_r5_or_r6()
2848 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2849 tmpdiv = rmd->first_row_offset; in pqi_calc_aio_r5_or_r6()
2850 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2851 rmd->r5or6_first_row_offset = rmd->first_row_offset; in pqi_calc_aio_r5_or_r6()
2852 tmpdiv = rmd->last_block; in pqi_calc_aio_r5_or_r6()
2853 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2854 tmpdiv = rmd->r5or6_last_row_offset; in pqi_calc_aio_r5_or_r6()
2855 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2856 tmpdiv = rmd->r5or6_first_row_offset; in pqi_calc_aio_r5_or_r6()
2857 do_div(tmpdiv, rmd->strip_size); in pqi_calc_aio_r5_or_r6()
2858 rmd->first_column = rmd->r5or6_first_column = tmpdiv; in pqi_calc_aio_r5_or_r6()
2859 tmpdiv = rmd->r5or6_last_row_offset; in pqi_calc_aio_r5_or_r6()
2860 do_div(tmpdiv, rmd->strip_size); in pqi_calc_aio_r5_or_r6()
2861 rmd->r5or6_last_column = tmpdiv; in pqi_calc_aio_r5_or_r6()
2863 rmd->first_row_offset = rmd->r5or6_first_row_offset = in pqi_calc_aio_r5_or_r6()
2864 (u32)((rmd->first_block % rmd->stripesize) % in pqi_calc_aio_r5_or_r6()
2865 rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2867 rmd->r5or6_last_row_offset = in pqi_calc_aio_r5_or_r6()
2868 (u32)((rmd->last_block % rmd->stripesize) % in pqi_calc_aio_r5_or_r6()
2869 rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2871 rmd->first_column = in pqi_calc_aio_r5_or_r6()
2872 rmd->r5or6_first_row_offset / rmd->strip_size; in pqi_calc_aio_r5_or_r6()
2873 rmd->r5or6_first_column = rmd->first_column; in pqi_calc_aio_r5_or_r6()
2874 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; in pqi_calc_aio_r5_or_r6()
2876 if (rmd->r5or6_first_column != rmd->r5or6_last_column) in pqi_calc_aio_r5_or_r6()
2880 rmd->map_row = in pqi_calc_aio_r5_or_r6()
2881 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % in pqi_calc_aio_r5_or_r6()
2882 get_unaligned_le16(&raid_map->row_cnt); in pqi_calc_aio_r5_or_r6()
2884 rmd->map_index = (rmd->first_group * in pqi_calc_aio_r5_or_r6()
2885 (get_unaligned_le16(&raid_map->row_cnt) * in pqi_calc_aio_r5_or_r6()
2886 rmd->total_disks_per_row)) + in pqi_calc_aio_r5_or_r6()
2887 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; in pqi_calc_aio_r5_or_r6()
2889 if (rmd->is_write) { in pqi_calc_aio_r5_or_r6()
2898 * The devices RAID map size is checked during device in pqi_calc_aio_r5_or_r6()
2901 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); in pqi_calc_aio_r5_or_r6()
2902 index *= rmd->total_disks_per_row; in pqi_calc_aio_r5_or_r6()
2903 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row); in pqi_calc_aio_r5_or_r6()
2905 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; in pqi_calc_aio_r5_or_r6()
2906 if (rmd->raid_level == SA_RAID_6) { in pqi_calc_aio_r5_or_r6()
2907 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; in pqi_calc_aio_r5_or_r6()
2908 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; in pqi_calc_aio_r5_or_r6()
2911 tmpdiv = rmd->first_block; in pqi_calc_aio_r5_or_r6()
2912 do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2913 rmd->row = tmpdiv; in pqi_calc_aio_r5_or_r6()
2915 rmd->row = rmd->first_block / rmd->blocks_per_row; in pqi_calc_aio_r5_or_r6()
2925 if (rmd->disk_block > 0xffffffff) { in pqi_set_aio_cdb()
2926 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; in pqi_set_aio_cdb()
2927 rmd->cdb[1] = 0; in pqi_set_aio_cdb()
2928 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]); in pqi_set_aio_cdb()
2929 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]); in pqi_set_aio_cdb()
2930 rmd->cdb[14] = 0; in pqi_set_aio_cdb()
2931 rmd->cdb[15] = 0; in pqi_set_aio_cdb()
2932 rmd->cdb_length = 16; in pqi_set_aio_cdb()
2934 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; in pqi_set_aio_cdb()
2935 rmd->cdb[1] = 0; in pqi_set_aio_cdb()
2936 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]); in pqi_set_aio_cdb()
2937 rmd->cdb[6] = 0; in pqi_set_aio_cdb()
2938 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]); in pqi_set_aio_cdb()
2939 rmd->cdb[9] = 0; in pqi_set_aio_cdb()
2940 rmd->cdb_length = 10; in pqi_set_aio_cdb()
2950 group = rmd->map_index / rmd->data_disks_per_row; in pqi_calc_aio_r1_nexus()
2952 index = rmd->map_index - (group * rmd->data_disks_per_row); in pqi_calc_aio_r1_nexus()
2953 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; in pqi_calc_aio_r1_nexus()
2954 index += rmd->data_disks_per_row; in pqi_calc_aio_r1_nexus()
2955 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; in pqi_calc_aio_r1_nexus()
2956 if (rmd->layout_map_count > 2) { in pqi_calc_aio_r1_nexus()
2957 index += rmd->data_disks_per_row; in pqi_calc_aio_r1_nexus()
2958 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; in pqi_calc_aio_r1_nexus()
2961 rmd->num_it_nexus_entries = rmd->layout_map_count; in pqi_calc_aio_r1_nexus()
2980 rmd.raid_level = device->raid_level; in pqi_raid_bypass_submit_scsi_cmd()
2988 raid_map = device->raid_map; in pqi_raid_bypass_submit_scsi_cmd()
2994 if (device->raid_level == SA_RAID_1 || in pqi_raid_bypass_submit_scsi_cmd()
2995 device->raid_level == SA_RAID_TRIPLE) { in pqi_raid_bypass_submit_scsi_cmd()
2999 group = device->next_bypass_group[rmd.map_index]; in pqi_raid_bypass_submit_scsi_cmd()
3003 device->next_bypass_group[rmd.map_index] = next_bypass_group; in pqi_raid_bypass_submit_scsi_cmd()
3006 } else if ((device->raid_level == SA_RAID_5 || in pqi_raid_bypass_submit_scsi_cmd()
3007 device->raid_level == SA_RAID_6) && in pqi_raid_bypass_submit_scsi_cmd()
3017 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; in pqi_raid_bypass_submit_scsi_cmd()
3018 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + in pqi_raid_bypass_submit_scsi_cmd()
3020 (rmd.first_row_offset - rmd.first_column * rmd.strip_size); in pqi_raid_bypass_submit_scsi_cmd()
3024 if (raid_map->phys_blk_shift) { in pqi_raid_bypass_submit_scsi_cmd()
3025 rmd.disk_block <<= raid_map->phys_blk_shift; in pqi_raid_bypass_submit_scsi_cmd()
3026 rmd.disk_block_cnt <<= raid_map->phys_blk_shift; in pqi_raid_bypass_submit_scsi_cmd()
3034 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) { in pqi_raid_bypass_submit_scsi_cmd()
3035 if (rmd.data_length > device->max_transfer_encrypted) in pqi_raid_bypass_submit_scsi_cmd()
3044 switch (device->raid_level) { in pqi_raid_bypass_submit_scsi_cmd()
3082 pqi_registers = ctrl_info->pqi_registers; in pqi_wait_for_pqi_mode_ready()
3086 signature = readq(&pqi_registers->signature); in pqi_wait_for_pqi_mode_ready()
3091 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
3093 return -ETIMEDOUT; in pqi_wait_for_pqi_mode_ready()
3099 status = readb(&pqi_registers->function_and_status_code); in pqi_wait_for_pqi_mode_ready()
3103 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
3105 return -ETIMEDOUT; in pqi_wait_for_pqi_mode_ready()
3111 if (readl(&pqi_registers->device_status) == in pqi_wait_for_pqi_mode_ready()
3115 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
3117 return -ETIMEDOUT; in pqi_wait_for_pqi_mode_ready()
3129 device = io_request->scmd->device->hostdata; in pqi_aio_path_disabled()
3130 device->raid_bypass_enabled = false; in pqi_aio_path_disabled()
3131 device->aio_enabled = false; in pqi_aio_path_disabled()
3139 device = sdev->hostdata; in pqi_take_device_offline()
3140 if (device->device_offline) in pqi_take_device_offline()
3143 device->device_offline = true; in pqi_take_device_offline()
3144 ctrl_info = shost_to_hba(sdev->host); in pqi_take_device_offline()
3146 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", in pqi_take_device_offline()
3147 path, ctrl_info->scsi_host->host_no, device->bus, in pqi_take_device_offline()
3148 device->target, device->lun); in pqi_take_device_offline()
3162 scmd = io_request->scmd; in pqi_process_raid_io_error()
3166 error_info = io_request->error_info; in pqi_process_raid_io_error()
3167 scsi_status = error_info->status; in pqi_process_raid_io_error()
3170 switch (error_info->data_out_result) { in pqi_process_raid_io_error()
3175 get_unaligned_le32(&error_info->data_out_transferred); in pqi_process_raid_io_error()
3176 residual_count = scsi_bufflen(scmd) - xfer_count; in pqi_process_raid_io_error()
3178 if (xfer_count < scmd->underflow) in pqi_process_raid_io_error()
3209 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); in pqi_process_raid_io_error()
3212 get_unaligned_le16(&error_info->response_data_length); in pqi_process_raid_io_error()
3214 if (sense_data_length > sizeof(error_info->data)) in pqi_process_raid_io_error()
3215 sense_data_length = sizeof(error_info->data); in pqi_process_raid_io_error()
3218 scsi_normalize_sense(error_info->data, in pqi_process_raid_io_error()
3222 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); in pqi_process_raid_io_error()
3223 struct pqi_scsi_dev *device = scmd->device->hostdata; in pqi_process_raid_io_error()
3229 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); in pqi_process_raid_io_error()
3230 pqi_take_device_offline(scmd->device, "RAID"); in pqi_process_raid_io_error()
3234 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ in pqi_process_raid_io_error()
3237 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); in pqi_process_raid_io_error()
3244 memcpy(scmd->sense_buffer, error_info->data, in pqi_process_raid_io_error()
3248 scmd->result = scsi_status; in pqi_process_raid_io_error()
3264 scmd = io_request->scmd; in pqi_process_aio_io_error()
3265 error_info = io_request->error_info; in pqi_process_aio_io_error()
3269 device = scmd->device->hostdata; in pqi_process_aio_io_error()
3271 switch (error_info->service_response) { in pqi_process_aio_io_error()
3273 scsi_status = error_info->status; in pqi_process_aio_io_error()
3276 switch (error_info->status) { in pqi_process_aio_io_error()
3283 &error_info->residual_count); in pqi_process_aio_io_error()
3285 xfer_count = scsi_bufflen(scmd) - residual_count; in pqi_process_aio_io_error()
3286 if (xfer_count < scmd->underflow) in pqi_process_aio_io_error()
3300 io_request->status = -EAGAIN; in pqi_process_aio_io_error()
3305 if (!io_request->raid_bypass) { in pqi_process_aio_io_error()
3307 pqi_take_device_offline(scmd->device, "AIO"); in pqi_process_aio_io_error()
3329 if (error_info->data_present) { in pqi_process_aio_io_error()
3331 get_unaligned_le16(&error_info->data_length); in pqi_process_aio_io_error()
3333 if (sense_data_length > sizeof(error_info->data)) in pqi_process_aio_io_error()
3334 sense_data_length = sizeof(error_info->data); in pqi_process_aio_io_error()
3337 memcpy(scmd->sense_buffer, error_info->data, in pqi_process_aio_io_error()
3345 scmd->result = scsi_status; in pqi_process_aio_io_error()
3367 switch (response->response_code) { in pqi_interpret_task_management_response()
3373 rc = -EAGAIN; in pqi_interpret_task_management_response()
3376 rc = -ENODEV; in pqi_interpret_task_management_response()
3379 rc = -EIO; in pqi_interpret_task_management_response()
3384 dev_err(&ctrl_info->pci_dev->dev, in pqi_interpret_task_management_response()
3385 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code); in pqi_interpret_task_management_response()
3406 oq_ci = queue_group->oq_ci_copy; in pqi_process_io_intr()
3409 oq_pi = readl(queue_group->oq_pi); in pqi_process_io_intr()
3410 if (oq_pi >= ctrl_info->num_elements_per_oq) { in pqi_process_io_intr()
3412 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3413 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", in pqi_process_io_intr()
3414 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); in pqi_process_io_intr()
3415 return -1; in pqi_process_io_intr()
3421 response = queue_group->oq_element_array + in pqi_process_io_intr()
3424 request_id = get_unaligned_le16(&response->request_id); in pqi_process_io_intr()
3425 if (request_id >= ctrl_info->max_io_slots) { in pqi_process_io_intr()
3427 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3428 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", in pqi_process_io_intr()
3429 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); in pqi_process_io_intr()
3430 return -1; in pqi_process_io_intr()
3433 io_request = &ctrl_info->io_request_pool[request_id]; in pqi_process_io_intr()
3434 if (atomic_read(&io_request->refcount) == 0) { in pqi_process_io_intr()
3436 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3439 return -1; in pqi_process_io_intr()
3442 switch (response->header.iu_type) { in pqi_process_io_intr()
3445 if (io_request->scmd) in pqi_process_io_intr()
3446 io_request->scmd->result = 0; in pqi_process_io_intr()
3451 io_request->status = in pqi_process_io_intr()
3453 &((struct pqi_vendor_general_response *)response)->status); in pqi_process_io_intr()
3456 io_request->status = pqi_interpret_task_management_response(ctrl_info, in pqi_process_io_intr()
3461 io_request->status = -EAGAIN; in pqi_process_io_intr()
3465 io_request->error_info = ctrl_info->error_buffer + in pqi_process_io_intr()
3466 (get_unaligned_le16(&response->error_index) * in pqi_process_io_intr()
3468 pqi_process_io_error(response->header.iu_type, io_request); in pqi_process_io_intr()
3472 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3474 response->header.iu_type, oq_pi, oq_ci); in pqi_process_io_intr()
3475 return -1; in pqi_process_io_intr()
3478 io_request->io_complete_callback(io_request, io_request->context); in pqi_process_io_intr()
3484 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; in pqi_process_io_intr()
3488 queue_group->oq_ci_copy = oq_ci; in pqi_process_io_intr()
3489 writel(oq_ci, queue_group->oq_ci); in pqi_process_io_intr()
3501 num_elements_used = pi - ci; in pqi_num_elements_free()
3503 num_elements_used = elements_in_queue - ci + pi; in pqi_num_elements_free()
3505 return elements_in_queue - num_elements_used - 1; in pqi_num_elements_free()
3517 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; in pqi_send_event_ack()
3518 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); in pqi_send_event_ack()
3521 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); in pqi_send_event_ack()
3523 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; in pqi_send_event_ack()
3524 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); in pqi_send_event_ack()
3527 ctrl_info->num_elements_per_iq)) in pqi_send_event_ack()
3531 &queue_group->submit_lock[RAID_PATH], flags); in pqi_send_event_ack()
3537 next_element = queue_group->iq_element_array[RAID_PATH] + in pqi_send_event_ack()
3542 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; in pqi_send_event_ack()
3543 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; in pqi_send_event_ack()
3549 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); in pqi_send_event_ack()
3551 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); in pqi_send_event_ack()
3562 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, in pqi_acknowledge_event()
3564 request.event_type = event->event_type; in pqi_acknowledge_event()
3565 put_unaligned_le16(event->event_id, &request.event_id); in pqi_acknowledge_event()
3566 put_unaligned_le32(event->additional_event_id, &request.additional_event_id); in pqi_acknowledge_event()
3594 dev_warn(&ctrl_info->pci_dev->dev, in pqi_poll_for_soft_reset_status()
3609 if (ctrl_info->soft_reset_handshake_supported) in pqi_process_soft_reset()
3621 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3626 ctrl_info->pqi_mode_enabled = false; in pqi_process_soft_reset()
3631 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3636 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3638 if (ctrl_info->soft_reset_handshake_supported) in pqi_process_soft_reset()
3647 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3676 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; in pqi_ofa_quiesce_worker()
3690 switch (event->event_id) { in pqi_ofa_process_event()
3692 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3694 schedule_work(&ctrl_info->ofa_memory_alloc_work); in pqi_ofa_process_event()
3697 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3699 schedule_work(&ctrl_info->ofa_quiesce_work); in pqi_ofa_process_event()
3703 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3705 ctrl_info->ofa_cancel_reason); in pqi_ofa_process_event()
3710 dev_err(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3712 event->event_id); in pqi_ofa_process_event()
3724 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_mark_volumes_for_rescan()
3726 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_mark_volumes_for_rescan()
3727 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) in pqi_mark_volumes_for_rescan()
3728 device->rescan = true; in pqi_mark_volumes_for_rescan()
3731 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_mark_volumes_for_rescan()
3739 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_disable_raid_bypass()
3741 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_disable_raid_bypass()
3742 if (device->raid_bypass_enabled) in pqi_disable_raid_bypass()
3743 device->raid_bypass_enabled = false; in pqi_disable_raid_bypass()
3745 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_disable_raid_bypass()
3764 event = ctrl_info->events; in pqi_event_worker()
3766 if (event->pending) { in pqi_event_worker()
3767 event->pending = false; in pqi_event_worker()
3768 if (event->event_type == PQI_EVENT_TYPE_OFA) { in pqi_event_worker()
3773 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE) in pqi_event_worker()
3775 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE) in pqi_event_worker()
3806 num_interrupts = atomic_read(&ctrl_info->num_interrupts); in pqi_heartbeat_timer_handler()
3809 if (num_interrupts == ctrl_info->previous_num_interrupts) { in pqi_heartbeat_timer_handler()
3810 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { in pqi_heartbeat_timer_handler()
3811 dev_err(&ctrl_info->pci_dev->dev, in pqi_heartbeat_timer_handler()
3812 "no heartbeat detected - last heartbeat count: %u\n", in pqi_heartbeat_timer_handler()
3818 ctrl_info->previous_num_interrupts = num_interrupts; in pqi_heartbeat_timer_handler()
3821 ctrl_info->previous_heartbeat_count = heartbeat_count; in pqi_heartbeat_timer_handler()
3822 mod_timer(&ctrl_info->heartbeat_timer, in pqi_heartbeat_timer_handler()
3828 if (!ctrl_info->heartbeat_counter) in pqi_start_heartbeat_timer()
3831 ctrl_info->previous_num_interrupts = in pqi_start_heartbeat_timer()
3832 atomic_read(&ctrl_info->num_interrupts); in pqi_start_heartbeat_timer()
3833 ctrl_info->previous_heartbeat_count = in pqi_start_heartbeat_timer()
3836 ctrl_info->heartbeat_timer.expires = in pqi_start_heartbeat_timer()
3838 add_timer(&ctrl_info->heartbeat_timer); in pqi_start_heartbeat_timer()
3843 del_timer_sync(&ctrl_info->heartbeat_timer); in pqi_stop_heartbeat_timer()
3849 switch (event->event_id) { in pqi_ofa_capture_event_payload()
3851 ctrl_info->ofa_bytes_requested = in pqi_ofa_capture_event_payload()
3852 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested); in pqi_ofa_capture_event_payload()
3855 ctrl_info->ofa_cancel_reason = in pqi_ofa_capture_event_payload()
3856 get_unaligned_le16(&response->data.ofa_cancelled.reason); in pqi_ofa_capture_event_payload()
3871 event_queue = &ctrl_info->event_queue; in pqi_process_event_intr()
3873 oq_ci = event_queue->oq_ci_copy; in pqi_process_event_intr()
3876 oq_pi = readl(event_queue->oq_pi); in pqi_process_event_intr()
3879 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_event_intr()
3880 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", in pqi_process_event_intr()
3881 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); in pqi_process_event_intr()
3882 return -1; in pqi_process_event_intr()
3889 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); in pqi_process_event_intr()
3891 event_index = pqi_event_type_to_event_index(response->event_type); in pqi_process_event_intr()
3893 if (event_index >= 0 && response->request_acknowledge) { in pqi_process_event_intr()
3894 event = &ctrl_info->events[event_index]; in pqi_process_event_intr()
3895 event->pending = true; in pqi_process_event_intr()
3896 event->event_type = response->event_type; in pqi_process_event_intr()
3897 event->event_id = get_unaligned_le16(&response->event_id); in pqi_process_event_intr()
3898 event->additional_event_id = in pqi_process_event_intr()
3899 get_unaligned_le32(&response->additional_event_id); in pqi_process_event_intr()
3900 if (event->event_type == PQI_EVENT_TYPE_OFA) in pqi_process_event_intr()
3908 event_queue->oq_ci_copy = oq_ci; in pqi_process_event_intr()
3909 writel(oq_ci, event_queue->oq_ci); in pqi_process_event_intr()
3910 schedule_work(&ctrl_info->event_work); in pqi_process_event_intr()
3924 pqi_registers = ctrl_info->pqi_registers; in pqi_configure_legacy_intx()
3927 register_addr = &pqi_registers->legacy_intx_mask_clear; in pqi_configure_legacy_intx()
3929 register_addr = &pqi_registers->legacy_intx_mask_set; in pqi_configure_legacy_intx()
3939 switch (ctrl_info->irq_mode) { in pqi_change_irq_mode()
3980 ctrl_info->irq_mode = new_mode; in pqi_change_irq_mode()
3990 switch (ctrl_info->irq_mode) { in pqi_is_valid_irq()
3995 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); in pqi_is_valid_irq()
4018 ctrl_info = queue_group->ctrl_info; in pqi_irq_handler()
4027 if (irq == ctrl_info->event_irq) { in pqi_irq_handler()
4036 atomic_inc(&ctrl_info->num_interrupts); in pqi_irq_handler()
4047 struct pci_dev *pci_dev = ctrl_info->pci_dev; in pqi_request_irqs()
4051 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); in pqi_request_irqs()
4053 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { in pqi_request_irqs()
4055 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); in pqi_request_irqs()
4057 dev_err(&pci_dev->dev, in pqi_request_irqs()
4062 ctrl_info->num_msix_vectors_initialized++; in pqi_request_irqs()
4072 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) in pqi_free_irqs()
4073 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), in pqi_free_irqs()
4074 &ctrl_info->queue_groups[i]); in pqi_free_irqs()
4076 ctrl_info->num_msix_vectors_initialized = 0; in pqi_free_irqs()
4087 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, in pqi_enable_msix_interrupts()
4088 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, in pqi_enable_msix_interrupts()
4091 dev_err(&ctrl_info->pci_dev->dev, in pqi_enable_msix_interrupts()
4092 "MSI-X init failed with error %d\n", in pqi_enable_msix_interrupts()
4097 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; in pqi_enable_msix_interrupts()
4098 ctrl_info->irq_mode = IRQ_MODE_MSIX; in pqi_enable_msix_interrupts()
4104 if (ctrl_info->num_msix_vectors_enabled) { in pqi_disable_msix_interrupts()
4105 pci_free_irq_vectors(ctrl_info->pci_dev); in pqi_disable_msix_interrupts()
4106 ctrl_info->num_msix_vectors_enabled = 0; in pqi_disable_msix_interrupts()
4126 ctrl_info->num_elements_per_iq; in pqi_alloc_operational_queues()
4129 ctrl_info->num_elements_per_oq; in pqi_alloc_operational_queues()
4130 num_inbound_queues = ctrl_info->num_queue_groups * 2; in pqi_alloc_operational_queues()
4131 num_outbound_queues = ctrl_info->num_queue_groups; in pqi_alloc_operational_queues()
4132 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; in pqi_alloc_operational_queues()
4164 ctrl_info->queue_memory_base = in pqi_alloc_operational_queues()
4165 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, in pqi_alloc_operational_queues()
4166 &ctrl_info->queue_memory_base_dma_handle, in pqi_alloc_operational_queues()
4169 if (!ctrl_info->queue_memory_base) in pqi_alloc_operational_queues()
4170 return -ENOMEM; in pqi_alloc_operational_queues()
4172 ctrl_info->queue_memory_length = alloc_length; in pqi_alloc_operational_queues()
4174 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, in pqi_alloc_operational_queues()
4177 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
4178 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
4179 queue_group->iq_element_array[RAID_PATH] = element_array; in pqi_alloc_operational_queues()
4180 queue_group->iq_element_array_bus_addr[RAID_PATH] = in pqi_alloc_operational_queues()
4181 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4182 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4186 queue_group->iq_element_array[AIO_PATH] = element_array; in pqi_alloc_operational_queues()
4187 queue_group->iq_element_array_bus_addr[AIO_PATH] = in pqi_alloc_operational_queues()
4188 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4189 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4195 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
4196 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
4197 queue_group->oq_element_array = element_array; in pqi_alloc_operational_queues()
4198 queue_group->oq_element_array_bus_addr = in pqi_alloc_operational_queues()
4199 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4200 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4206 ctrl_info->event_queue.oq_element_array = element_array; in pqi_alloc_operational_queues()
4207 ctrl_info->event_queue.oq_element_array_bus_addr = in pqi_alloc_operational_queues()
4208 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4209 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4216 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
4217 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
4218 queue_group->iq_ci[RAID_PATH] = next_queue_index; in pqi_alloc_operational_queues()
4219 queue_group->iq_ci_bus_addr[RAID_PATH] = in pqi_alloc_operational_queues()
4220 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4221 (next_queue_index - in pqi_alloc_operational_queues()
4222 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4226 queue_group->iq_ci[AIO_PATH] = next_queue_index; in pqi_alloc_operational_queues()
4227 queue_group->iq_ci_bus_addr[AIO_PATH] = in pqi_alloc_operational_queues()
4228 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4229 (next_queue_index - in pqi_alloc_operational_queues()
4230 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4234 queue_group->oq_pi = next_queue_index; in pqi_alloc_operational_queues()
4235 queue_group->oq_pi_bus_addr = in pqi_alloc_operational_queues()
4236 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4237 (next_queue_index - in pqi_alloc_operational_queues()
4238 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4244 ctrl_info->event_queue.oq_pi = next_queue_index; in pqi_alloc_operational_queues()
4245 ctrl_info->event_queue.oq_pi_bus_addr = in pqi_alloc_operational_queues()
4246 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4247 (next_queue_index - in pqi_alloc_operational_queues()
4248 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4263 for (i = 0; i < ctrl_info->num_queue_groups; i++) in pqi_init_operational_queues()
4264 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; in pqi_init_operational_queues()
4271 ctrl_info->event_queue.oq_id = next_oq_id++; in pqi_init_operational_queues()
4272 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_init_operational_queues()
4273 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; in pqi_init_operational_queues()
4274 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; in pqi_init_operational_queues()
4275 ctrl_info->queue_groups[i].oq_id = next_oq_id++; in pqi_init_operational_queues()
4279 * Assign MSI-X table entry indexes to all queues. Note that the in pqi_init_operational_queues()
4282 ctrl_info->event_queue.int_msg_num = 0; in pqi_init_operational_queues()
4283 for (i = 0; i < ctrl_info->num_queue_groups; i++) in pqi_init_operational_queues()
4284 ctrl_info->queue_groups[i].int_msg_num = i; in pqi_init_operational_queues()
4286 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_init_operational_queues()
4287 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); in pqi_init_operational_queues()
4288 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); in pqi_init_operational_queues()
4289 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); in pqi_init_operational_queues()
4290 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); in pqi_init_operational_queues()
4303 ctrl_info->admin_queue_memory_base = in pqi_alloc_admin_queues()
4304 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, in pqi_alloc_admin_queues()
4305 &ctrl_info->admin_queue_memory_base_dma_handle, in pqi_alloc_admin_queues()
4308 if (!ctrl_info->admin_queue_memory_base) in pqi_alloc_admin_queues()
4309 return -ENOMEM; in pqi_alloc_admin_queues()
4311 ctrl_info->admin_queue_memory_length = alloc_length; in pqi_alloc_admin_queues()
4313 admin_queues = &ctrl_info->admin_queues; in pqi_alloc_admin_queues()
4314 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, in pqi_alloc_admin_queues()
4316 admin_queues->iq_element_array = in pqi_alloc_admin_queues()
4317 &admin_queues_aligned->iq_element_array; in pqi_alloc_admin_queues()
4318 admin_queues->oq_element_array = in pqi_alloc_admin_queues()
4319 &admin_queues_aligned->oq_element_array; in pqi_alloc_admin_queues()
4320 admin_queues->iq_ci = in pqi_alloc_admin_queues()
4321 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci; in pqi_alloc_admin_queues()
4322 admin_queues->oq_pi = in pqi_alloc_admin_queues()
4323 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; in pqi_alloc_admin_queues()
4325 admin_queues->iq_element_array_bus_addr = in pqi_alloc_admin_queues()
4326 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4327 (admin_queues->iq_element_array - in pqi_alloc_admin_queues()
4328 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4329 admin_queues->oq_element_array_bus_addr = in pqi_alloc_admin_queues()
4330 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4331 (admin_queues->oq_element_array - in pqi_alloc_admin_queues()
4332 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4333 admin_queues->iq_ci_bus_addr = in pqi_alloc_admin_queues()
4334 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4335 ((void __iomem *)admin_queues->iq_ci - in pqi_alloc_admin_queues()
4336 (void __iomem *)ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4337 admin_queues->oq_pi_bus_addr = in pqi_alloc_admin_queues()
4338 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4339 ((void __iomem *)admin_queues->oq_pi - in pqi_alloc_admin_queues()
4340 (void __iomem *)ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4356 pqi_registers = ctrl_info->pqi_registers; in pqi_create_admin_queues()
4357 admin_queues = &ctrl_info->admin_queues; in pqi_create_admin_queues()
4359 writeq((u64)admin_queues->iq_element_array_bus_addr, in pqi_create_admin_queues()
4360 &pqi_registers->admin_iq_element_array_addr); in pqi_create_admin_queues()
4361 writeq((u64)admin_queues->oq_element_array_bus_addr, in pqi_create_admin_queues()
4362 &pqi_registers->admin_oq_element_array_addr); in pqi_create_admin_queues()
4363 writeq((u64)admin_queues->iq_ci_bus_addr, in pqi_create_admin_queues()
4364 &pqi_registers->admin_iq_ci_addr); in pqi_create_admin_queues()
4365 writeq((u64)admin_queues->oq_pi_bus_addr, in pqi_create_admin_queues()
4366 &pqi_registers->admin_oq_pi_addr); in pqi_create_admin_queues()
4370 (admin_queues->int_msg_num << 16); in pqi_create_admin_queues()
4371 writel(reg, &pqi_registers->admin_iq_num_elements); in pqi_create_admin_queues()
4374 &pqi_registers->function_and_status_code); in pqi_create_admin_queues()
4379 status = readb(&pqi_registers->function_and_status_code); in pqi_create_admin_queues()
4383 return -ETIMEDOUT; in pqi_create_admin_queues()
4391 admin_queues->iq_pi = ctrl_info->iomem_base + in pqi_create_admin_queues()
4393 readq(&pqi_registers->admin_iq_pi_offset); in pqi_create_admin_queues()
4394 admin_queues->oq_ci = ctrl_info->iomem_base + in pqi_create_admin_queues()
4396 readq(&pqi_registers->admin_oq_ci_offset); in pqi_create_admin_queues()
4408 admin_queues = &ctrl_info->admin_queues; in pqi_submit_admin_request()
4409 iq_pi = admin_queues->iq_pi_copy; in pqi_submit_admin_request()
4411 next_element = admin_queues->iq_element_array + in pqi_submit_admin_request()
4417 admin_queues->iq_pi_copy = iq_pi; in pqi_submit_admin_request()
4423 writel(iq_pi, admin_queues->iq_pi); in pqi_submit_admin_request()
4436 admin_queues = &ctrl_info->admin_queues; in pqi_poll_for_admin_response()
4437 oq_ci = admin_queues->oq_ci_copy; in pqi_poll_for_admin_response()
4442 oq_pi = readl(admin_queues->oq_pi); in pqi_poll_for_admin_response()
4446 dev_err(&ctrl_info->pci_dev->dev, in pqi_poll_for_admin_response()
4448 return -ETIMEDOUT; in pqi_poll_for_admin_response()
4451 return -ENXIO; in pqi_poll_for_admin_response()
4455 memcpy(response, admin_queues->oq_element_array + in pqi_poll_for_admin_response()
4459 admin_queues->oq_ci_copy = oq_ci; in pqi_poll_for_admin_response()
4460 writel(oq_ci, admin_queues->oq_ci); in pqi_poll_for_admin_response()
4480 spin_lock_irqsave(&queue_group->submit_lock[path], flags); in pqi_start_io()
4483 io_request->queue_group = queue_group; in pqi_start_io()
4484 list_add_tail(&io_request->request_list_entry, in pqi_start_io()
4485 &queue_group->request_list[path]); in pqi_start_io()
4488 iq_pi = queue_group->iq_pi_copy[path]; in pqi_start_io()
4491 &queue_group->request_list[path], request_list_entry) { in pqi_start_io()
4493 request = io_request->iu; in pqi_start_io()
4495 iu_length = get_unaligned_le16(&request->iu_length) + in pqi_start_io()
4501 iq_ci = readl(queue_group->iq_ci[path]); in pqi_start_io()
4504 ctrl_info->num_elements_per_iq)) in pqi_start_io()
4507 put_unaligned_le16(queue_group->oq_id, in pqi_start_io()
4508 &request->response_queue_id); in pqi_start_io()
4510 next_element = queue_group->iq_element_array[path] + in pqi_start_io()
4514 ctrl_info->num_elements_per_iq - iq_pi; in pqi_start_io()
4522 memcpy(queue_group->iq_element_array[path], in pqi_start_io()
4524 iu_length - copy_count); in pqi_start_io()
4528 ctrl_info->num_elements_per_iq; in pqi_start_io()
4530 list_del(&io_request->request_list_entry); in pqi_start_io()
4533 if (iq_pi != queue_group->iq_pi_copy[path]) { in pqi_start_io()
4534 queue_group->iq_pi_copy[path] = iq_pi; in pqi_start_io()
4539 writel(iq_pi, queue_group->iq_pi[path]); in pqi_start_io()
4542 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); in pqi_start_io()
4561 rc = -ENXIO; in pqi_wait_for_completion_io()
4580 int rc = -EIO; in pqi_process_raid_io_error_synchronous()
4582 switch (error_info->data_out_result) { in pqi_process_raid_io_error_synchronous()
4584 if (error_info->status == SAM_STAT_GOOD) in pqi_process_raid_io_error_synchronous()
4588 if (error_info->status == SAM_STAT_GOOD || in pqi_process_raid_io_error_synchronous()
4589 error_info->status == SAM_STAT_CHECK_CONDITION) in pqi_process_raid_io_error_synchronous()
4602 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0; in pqi_is_blockable_request()
4615 if (down_interruptible(&ctrl_info->sync_request_sem)) in pqi_submit_raid_request_synchronous()
4616 return -ERESTARTSYS; in pqi_submit_raid_request_synchronous()
4618 down(&ctrl_info->sync_request_sem); in pqi_submit_raid_request_synchronous()
4630 rc = -ENXIO; in pqi_submit_raid_request_synchronous()
4636 put_unaligned_le16(io_request->index, in pqi_submit_raid_request_synchronous()
4637 &(((struct pqi_raid_path_request *)request)->request_id)); in pqi_submit_raid_request_synchronous()
4639 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) in pqi_submit_raid_request_synchronous()
4640 ((struct pqi_raid_path_request *)request)->error_index = in pqi_submit_raid_request_synchronous()
4641 ((struct pqi_raid_path_request *)request)->request_id; in pqi_submit_raid_request_synchronous()
4643 iu_length = get_unaligned_le16(&request->iu_length) + in pqi_submit_raid_request_synchronous()
4645 memcpy(io_request->iu, request, iu_length); in pqi_submit_raid_request_synchronous()
4647 io_request->io_complete_callback = pqi_raid_synchronous_complete; in pqi_submit_raid_request_synchronous()
4648 io_request->context = &wait; in pqi_submit_raid_request_synchronous()
4650 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, in pqi_submit_raid_request_synchronous()
4656 if (io_request->error_info) in pqi_submit_raid_request_synchronous()
4657 memcpy(error_info, io_request->error_info, sizeof(*error_info)); in pqi_submit_raid_request_synchronous()
4660 } else if (rc == 0 && io_request->error_info) { in pqi_submit_raid_request_synchronous()
4661 rc = pqi_process_raid_io_error_synchronous(io_request->error_info); in pqi_submit_raid_request_synchronous()
4668 up(&ctrl_info->sync_request_sem); in pqi_submit_raid_request_synchronous()
4676 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) in pqi_validate_admin_response()
4677 return -EINVAL; in pqi_validate_admin_response()
4679 if (get_unaligned_le16(&response->header.iu_length) != in pqi_validate_admin_response()
4681 return -EINVAL; in pqi_validate_admin_response()
4683 if (response->function_code != expected_function_code) in pqi_validate_admin_response()
4684 return -EINVAL; in pqi_validate_admin_response()
4686 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) in pqi_validate_admin_response()
4687 return -EINVAL; in pqi_validate_admin_response()
4704 rc = pqi_validate_admin_response(response, request->function_code); in pqi_submit_admin_request_synchronous()
4719 return -ENOMEM; in pqi_report_device_capability()
4731 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_report_device_capability()
4740 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_report_device_capability()
4748 rc = -EIO; in pqi_report_device_capability()
4752 ctrl_info->max_inbound_queues = in pqi_report_device_capability()
4753 get_unaligned_le16(&capability->max_inbound_queues); in pqi_report_device_capability()
4754 ctrl_info->max_elements_per_iq = in pqi_report_device_capability()
4755 get_unaligned_le16(&capability->max_elements_per_iq); in pqi_report_device_capability()
4756 ctrl_info->max_iq_element_length = in pqi_report_device_capability()
4757 get_unaligned_le16(&capability->max_iq_element_length) in pqi_report_device_capability()
4759 ctrl_info->max_outbound_queues = in pqi_report_device_capability()
4760 get_unaligned_le16(&capability->max_outbound_queues); in pqi_report_device_capability()
4761 ctrl_info->max_elements_per_oq = in pqi_report_device_capability()
4762 get_unaligned_le16(&capability->max_elements_per_oq); in pqi_report_device_capability()
4763 ctrl_info->max_oq_element_length = in pqi_report_device_capability()
4764 get_unaligned_le16(&capability->max_oq_element_length) in pqi_report_device_capability()
4768 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; in pqi_report_device_capability()
4770 ctrl_info->max_inbound_iu_length_per_firmware = in pqi_report_device_capability()
4772 &sop_iu_layer_descriptor->max_inbound_iu_length); in pqi_report_device_capability()
4773 ctrl_info->inbound_spanning_supported = in pqi_report_device_capability()
4774 sop_iu_layer_descriptor->inbound_spanning_supported; in pqi_report_device_capability()
4775 ctrl_info->outbound_spanning_supported = in pqi_report_device_capability()
4776 sop_iu_layer_descriptor->outbound_spanning_supported; in pqi_report_device_capability()
4786 if (ctrl_info->max_iq_element_length < in pqi_validate_device_capability()
4788 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4790 ctrl_info->max_iq_element_length, in pqi_validate_device_capability()
4792 return -EINVAL; in pqi_validate_device_capability()
4795 if (ctrl_info->max_oq_element_length < in pqi_validate_device_capability()
4797 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4799 ctrl_info->max_oq_element_length, in pqi_validate_device_capability()
4801 return -EINVAL; in pqi_validate_device_capability()
4804 if (ctrl_info->max_inbound_iu_length_per_firmware < in pqi_validate_device_capability()
4806 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4808 ctrl_info->max_inbound_iu_length_per_firmware, in pqi_validate_device_capability()
4810 return -EINVAL; in pqi_validate_device_capability()
4813 if (!ctrl_info->inbound_spanning_supported) { in pqi_validate_device_capability()
4814 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4816 return -EINVAL; in pqi_validate_device_capability()
4819 if (ctrl_info->outbound_spanning_supported) { in pqi_validate_device_capability()
4820 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4822 return -EINVAL; in pqi_validate_device_capability()
4835 event_queue = &ctrl_info->event_queue; in pqi_create_event_queue()
4838 * Create OQ (Outbound Queue - device to host queue) to dedicate in pqi_create_event_queue()
4846 put_unaligned_le16(event_queue->oq_id, in pqi_create_event_queue()
4848 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, in pqi_create_event_queue()
4850 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, in pqi_create_event_queue()
4857 put_unaligned_le16(event_queue->int_msg_num, in pqi_create_event_queue()
4865 event_queue->oq_ci = ctrl_info->iomem_base + in pqi_create_event_queue()
4881 queue_group = &ctrl_info->queue_groups[group_number]; in pqi_create_queue_group()
4884 * Create IQ (Inbound Queue - host to device queue) for in pqi_create_queue_group()
4892 put_unaligned_le16(queue_group->iq_id[RAID_PATH], in pqi_create_queue_group()
4895 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], in pqi_create_queue_group()
4897 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], in pqi_create_queue_group()
4899 put_unaligned_le16(ctrl_info->num_elements_per_iq, in pqi_create_queue_group()
4908 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4913 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4919 * Create IQ (Inbound Queue - host to device queue) for in pqi_create_queue_group()
4927 put_unaligned_le16(queue_group->iq_id[AIO_PATH], in pqi_create_queue_group()
4929 put_unaligned_le64((u64)queue_group-> in pqi_create_queue_group()
4932 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], in pqi_create_queue_group()
4934 put_unaligned_le16(ctrl_info->num_elements_per_iq, in pqi_create_queue_group()
4943 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4948 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4963 put_unaligned_le16(queue_group->iq_id[AIO_PATH], in pqi_create_queue_group()
4971 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4977 * Create OQ (Outbound Queue - device to host queue). in pqi_create_queue_group()
4984 put_unaligned_le16(queue_group->oq_id, in pqi_create_queue_group()
4986 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, in pqi_create_queue_group()
4988 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, in pqi_create_queue_group()
4990 put_unaligned_le16(ctrl_info->num_elements_per_oq, in pqi_create_queue_group()
4995 put_unaligned_le16(queue_group->int_msg_num, in pqi_create_queue_group()
5001 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
5006 queue_group->oq_ci = ctrl_info->iomem_base + in pqi_create_queue_group()
5021 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queues()
5026 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_create_queues()
5029 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queues()
5031 i, ctrl_info->num_queue_groups); in pqi_create_queues()
5054 return -ENOMEM; in pqi_configure_events()
5060 data.report_event_configuration.sg_descriptors[1]) - in pqi_configure_events()
5065 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_configure_events()
5074 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_configure_events()
5081 for (i = 0; i < event_config->num_event_descriptors; i++) { in pqi_configure_events()
5082 event_descriptor = &event_config->descriptors[i]; in pqi_configure_events()
5084 pqi_is_supported_event(event_descriptor->event_type)) in pqi_configure_events()
5085 put_unaligned_le16(ctrl_info->event_queue.oq_id, in pqi_configure_events()
5086 &event_descriptor->oq_id); in pqi_configure_events()
5088 put_unaligned_le16(0, &event_descriptor->oq_id); in pqi_configure_events()
5095 data.report_event_configuration.sg_descriptors[1]) - in pqi_configure_events()
5100 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_configure_events()
5109 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_configure_events()
5131 if (!ctrl_info->io_request_pool) in pqi_free_all_io_requests()
5134 dev = &ctrl_info->pci_dev->dev; in pqi_free_all_io_requests()
5135 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; in pqi_free_all_io_requests()
5136 io_request = ctrl_info->io_request_pool; in pqi_free_all_io_requests()
5138 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_free_all_io_requests()
5139 kfree(io_request->iu); in pqi_free_all_io_requests()
5140 if (!io_request->sg_chain_buffer) in pqi_free_all_io_requests()
5143 io_request->sg_chain_buffer, in pqi_free_all_io_requests()
5144 io_request->sg_chain_buffer_dma_handle); in pqi_free_all_io_requests()
5148 kfree(ctrl_info->io_request_pool); in pqi_free_all_io_requests()
5149 ctrl_info->io_request_pool = NULL; in pqi_free_all_io_requests()
5154 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, in pqi_alloc_error_buffer()
5155 ctrl_info->error_buffer_length, in pqi_alloc_error_buffer()
5156 &ctrl_info->error_buffer_dma_handle, in pqi_alloc_error_buffer()
5158 if (!ctrl_info->error_buffer) in pqi_alloc_error_buffer()
5159 return -ENOMEM; in pqi_alloc_error_buffer()
5173 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, in pqi_alloc_io_resources()
5174 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); in pqi_alloc_io_resources()
5176 if (!ctrl_info->io_request_pool) { in pqi_alloc_io_resources()
5177 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
5182 dev = &ctrl_info->pci_dev->dev; in pqi_alloc_io_resources()
5183 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; in pqi_alloc_io_resources()
5184 io_request = ctrl_info->io_request_pool; in pqi_alloc_io_resources()
5186 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_alloc_io_resources()
5187 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); in pqi_alloc_io_resources()
5189 if (!io_request->iu) { in pqi_alloc_io_resources()
5190 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
5200 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
5201 "failed to allocate PQI scatter-gather chain buffers\n"); in pqi_alloc_io_resources()
5205 io_request->index = i; in pqi_alloc_io_resources()
5206 io_request->sg_chain_buffer = sg_chain_buffer; in pqi_alloc_io_resources()
5207 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle; in pqi_alloc_io_resources()
5216 return -ENOMEM; in pqi_alloc_io_resources()
5221 * requests and max. transfer size.
5229 ctrl_info->scsi_ml_can_queue = in pqi_calculate_io_resources()
5230 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; in pqi_calculate_io_resources()
5231 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; in pqi_calculate_io_resources()
5233 ctrl_info->error_buffer_length = in pqi_calculate_io_resources()
5234 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; in pqi_calculate_io_resources()
5237 max_transfer_size = min(ctrl_info->max_transfer_size, in pqi_calculate_io_resources()
5240 max_transfer_size = min(ctrl_info->max_transfer_size, in pqi_calculate_io_resources()
5245 /* +1 to cover when the buffer is not page-aligned. */ in pqi_calculate_io_resources()
5248 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); in pqi_calculate_io_resources()
5250 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; in pqi_calculate_io_resources()
5252 ctrl_info->sg_chain_buffer_length = in pqi_calculate_io_resources()
5255 ctrl_info->sg_tablesize = max_sg_entries; in pqi_calculate_io_resources()
5256 ctrl_info->max_sectors = max_transfer_size / 512; in pqi_calculate_io_resources()
5271 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, in pqi_calculate_queue_resources()
5272 ctrl_info->max_outbound_queues - 1); in pqi_calculate_queue_resources()
5276 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); in pqi_calculate_queue_resources()
5280 ctrl_info->num_queue_groups = num_queue_groups; in pqi_calculate_queue_resources()
5286 ctrl_info->max_inbound_iu_length = in pqi_calculate_queue_resources()
5287 (ctrl_info->max_inbound_iu_length_per_firmware / in pqi_calculate_queue_resources()
5292 (ctrl_info->max_inbound_iu_length / in pqi_calculate_queue_resources()
5299 ctrl_info->max_elements_per_iq); in pqi_calculate_queue_resources()
5301 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; in pqi_calculate_queue_resources()
5303 ctrl_info->max_elements_per_oq); in pqi_calculate_queue_resources()
5305 ctrl_info->num_elements_per_iq = num_elements_per_iq; in pqi_calculate_queue_resources()
5306 ctrl_info->num_elements_per_oq = num_elements_per_oq; in pqi_calculate_queue_resources()
5308 ctrl_info->max_sg_per_iu = in pqi_calculate_queue_resources()
5309 ((ctrl_info->max_inbound_iu_length - in pqi_calculate_queue_resources()
5314 ctrl_info->max_sg_per_r56_iu = in pqi_calculate_queue_resources()
5315 ((ctrl_info->max_inbound_iu_length - in pqi_calculate_queue_resources()
5327 put_unaligned_le64(address, &sg_descriptor->address); in pqi_set_sg_descriptor()
5328 put_unaligned_le32(length, &sg_descriptor->length); in pqi_set_sg_descriptor()
5329 put_unaligned_le32(0, &sg_descriptor->flags); in pqi_set_sg_descriptor()
5342 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */ in pqi_build_sg_list()
5353 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle, in pqi_build_sg_list()
5354 &sg_descriptor->address); in pqi_build_sg_list()
5355 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor), in pqi_build_sg_list()
5356 &sg_descriptor->length); in pqi_build_sg_list()
5357 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags); in pqi_build_sg_list()
5360 sg_descriptor = io_request->sg_chain_buffer; in pqi_build_sg_list()
5365 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); in pqi_build_sg_list()
5385 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - in pqi_build_raid_sg_list()
5392 sg_descriptor = request->sg_descriptors; in pqi_build_raid_sg_list()
5395 ctrl_info->max_sg_per_iu, &chained); in pqi_build_raid_sg_list()
5397 request->partial = chained; in pqi_build_raid_sg_list()
5401 put_unaligned_le16(iu_length, &request->header.iu_length); in pqi_build_raid_sg_list()
5421 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) - in pqi_build_aio_r1_sg_list()
5429 sg_descriptor = request->sg_descriptors; in pqi_build_aio_r1_sg_list()
5432 ctrl_info->max_sg_per_iu, &chained); in pqi_build_aio_r1_sg_list()
5434 request->partial = chained; in pqi_build_aio_r1_sg_list()
5438 put_unaligned_le16(iu_length, &request->header.iu_length); in pqi_build_aio_r1_sg_list()
5439 request->num_sg_descriptors = num_sg_in_iu; in pqi_build_aio_r1_sg_list()
5459 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - in pqi_build_aio_r56_sg_list()
5465 sg_descriptor = request->sg_descriptors; in pqi_build_aio_r56_sg_list()
5468 ctrl_info->max_sg_per_r56_iu, &chained); in pqi_build_aio_r56_sg_list()
5470 request->partial = chained; in pqi_build_aio_r56_sg_list()
5474 put_unaligned_le16(iu_length, &request->header.iu_length); in pqi_build_aio_r56_sg_list()
5475 request->num_sg_descriptors = num_sg_in_iu; in pqi_build_aio_r56_sg_list()
5495 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - in pqi_build_aio_sg_list()
5503 sg_descriptor = request->sg_descriptors; in pqi_build_aio_sg_list()
5506 ctrl_info->max_sg_per_iu, &chained); in pqi_build_aio_sg_list()
5508 request->partial = chained; in pqi_build_aio_sg_list()
5512 put_unaligned_le16(iu_length, &request->header.iu_length); in pqi_build_aio_sg_list()
5513 request->num_sg_descriptors = num_sg_in_iu; in pqi_build_aio_sg_list()
5523 scmd = io_request->scmd; in pqi_raid_io_complete()
5542 io_request->io_complete_callback = pqi_raid_io_complete; in pqi_raid_submit_io()
5543 io_request->scmd = scmd; in pqi_raid_submit_io()
5545 request = io_request->iu; in pqi_raid_submit_io()
5548 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; in pqi_raid_submit_io()
5549 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); in pqi_raid_submit_io()
5550 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; in pqi_raid_submit_io()
5551 request->command_priority = io_high_prio; in pqi_raid_submit_io()
5552 put_unaligned_le16(io_request->index, &request->request_id); in pqi_raid_submit_io()
5553 request->error_index = request->request_id; in pqi_raid_submit_io()
5554 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); in pqi_raid_submit_io()
5555 request->ml_device_lun_number = (u8)scmd->device->lun; in pqi_raid_submit_io()
5557 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); in pqi_raid_submit_io()
5558 memcpy(request->cdb, scmd->cmnd, cdb_length); in pqi_raid_submit_io()
5565 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; in pqi_raid_submit_io()
5568 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4; in pqi_raid_submit_io()
5571 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8; in pqi_raid_submit_io()
5574 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12; in pqi_raid_submit_io()
5578 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16; in pqi_raid_submit_io()
5582 switch (scmd->sc_data_direction) { in pqi_raid_submit_io()
5584 request->data_direction = SOP_READ_FLAG; in pqi_raid_submit_io()
5587 request->data_direction = SOP_WRITE_FLAG; in pqi_raid_submit_io()
5590 request->data_direction = SOP_NO_DIRECTION_FLAG; in pqi_raid_submit_io()
5593 request->data_direction = SOP_BIDIRECTIONAL; in pqi_raid_submit_io()
5596 dev_err(&ctrl_info->pci_dev->dev, in pqi_raid_submit_io()
5598 scmd->sc_data_direction); in pqi_raid_submit_io()
5630 if (!io_request->raid_bypass) in pqi_raid_bypass_retry_needed()
5633 scmd = io_request->scmd; in pqi_raid_bypass_retry_needed()
5634 if ((scmd->result & 0xff) == SAM_STAT_GOOD) in pqi_raid_bypass_retry_needed()
5636 if (host_byte(scmd->result) == DID_NO_CONNECT) in pqi_raid_bypass_retry_needed()
5639 device = scmd->device->hostdata; in pqi_raid_bypass_retry_needed()
5643 ctrl_info = shost_to_hba(scmd->device->host); in pqi_raid_bypass_retry_needed()
5655 scmd = io_request->scmd; in pqi_aio_io_complete()
5657 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) { in pqi_aio_io_complete()
5659 pqi_cmd_priv(scmd)->this_residual++; in pqi_aio_io_complete()
5674 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, in pqi_aio_submit_scsi_cmd()
5675 scmd->cmnd, scmd->cmd_len, queue_group, NULL, in pqi_aio_submit_scsi_cmd()
5693 io_request->io_complete_callback = pqi_aio_io_complete; in pqi_aio_submit_io()
5694 io_request->scmd = scmd; in pqi_aio_submit_io()
5695 io_request->raid_bypass = raid_bypass; in pqi_aio_submit_io()
5697 request = io_request->iu; in pqi_aio_submit_io()
5700 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; in pqi_aio_submit_io()
5701 put_unaligned_le32(aio_handle, &request->nexus_id); in pqi_aio_submit_io()
5702 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); in pqi_aio_submit_io()
5703 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; in pqi_aio_submit_io()
5704 request->command_priority = io_high_prio; in pqi_aio_submit_io()
5705 put_unaligned_le16(io_request->index, &request->request_id); in pqi_aio_submit_io()
5706 request->error_index = request->request_id; in pqi_aio_submit_io()
5707 if (!raid_bypass && ctrl_info->multi_lun_device_supported) in pqi_aio_submit_io()
5708 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number); in pqi_aio_submit_io()
5709 if (cdb_length > sizeof(request->cdb)) in pqi_aio_submit_io()
5710 cdb_length = sizeof(request->cdb); in pqi_aio_submit_io()
5711 request->cdb_length = cdb_length; in pqi_aio_submit_io()
5712 memcpy(request->cdb, cdb, cdb_length); in pqi_aio_submit_io()
5714 switch (scmd->sc_data_direction) { in pqi_aio_submit_io()
5716 request->data_direction = SOP_READ_FLAG; in pqi_aio_submit_io()
5719 request->data_direction = SOP_WRITE_FLAG; in pqi_aio_submit_io()
5722 request->data_direction = SOP_NO_DIRECTION_FLAG; in pqi_aio_submit_io()
5725 request->data_direction = SOP_BIDIRECTIONAL; in pqi_aio_submit_io()
5728 dev_err(&ctrl_info->pci_dev->dev, in pqi_aio_submit_io()
5730 scmd->sc_data_direction); in pqi_aio_submit_io()
5735 request->encryption_enable = true; in pqi_aio_submit_io()
5736 put_unaligned_le16(encryption_info->data_encryption_key_index, in pqi_aio_submit_io()
5737 &request->data_encryption_key_index); in pqi_aio_submit_io()
5738 put_unaligned_le32(encryption_info->encrypt_tweak_lower, in pqi_aio_submit_io()
5739 &request->encrypt_tweak_lower); in pqi_aio_submit_io()
5740 put_unaligned_le32(encryption_info->encrypt_tweak_upper, in pqi_aio_submit_io()
5741 &request->encrypt_tweak_upper); in pqi_aio_submit_io()
5768 io_request->io_complete_callback = pqi_aio_io_complete; in pqi_aio_submit_r1_write_io()
5769 io_request->scmd = scmd; in pqi_aio_submit_r1_write_io()
5770 io_request->raid_bypass = true; in pqi_aio_submit_r1_write_io()
5772 r1_request = io_request->iu; in pqi_aio_submit_r1_write_io()
5775 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO; in pqi_aio_submit_r1_write_io()
5776 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id); in pqi_aio_submit_r1_write_io()
5777 r1_request->num_drives = rmd->num_it_nexus_entries; in pqi_aio_submit_r1_write_io()
5778 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1); in pqi_aio_submit_r1_write_io()
5779 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2); in pqi_aio_submit_r1_write_io()
5780 if (rmd->num_it_nexus_entries == 3) in pqi_aio_submit_r1_write_io()
5781 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3); in pqi_aio_submit_r1_write_io()
5783 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length); in pqi_aio_submit_r1_write_io()
5784 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; in pqi_aio_submit_r1_write_io()
5785 put_unaligned_le16(io_request->index, &r1_request->request_id); in pqi_aio_submit_r1_write_io()
5786 r1_request->error_index = r1_request->request_id; in pqi_aio_submit_r1_write_io()
5787 if (rmd->cdb_length > sizeof(r1_request->cdb)) in pqi_aio_submit_r1_write_io()
5788 rmd->cdb_length = sizeof(r1_request->cdb); in pqi_aio_submit_r1_write_io()
5789 r1_request->cdb_length = rmd->cdb_length; in pqi_aio_submit_r1_write_io()
5790 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); in pqi_aio_submit_r1_write_io()
5793 r1_request->data_direction = SOP_READ_FLAG; in pqi_aio_submit_r1_write_io()
5796 r1_request->encryption_enable = true; in pqi_aio_submit_r1_write_io()
5797 put_unaligned_le16(encryption_info->data_encryption_key_index, in pqi_aio_submit_r1_write_io()
5798 &r1_request->data_encryption_key_index); in pqi_aio_submit_r1_write_io()
5799 put_unaligned_le32(encryption_info->encrypt_tweak_lower, in pqi_aio_submit_r1_write_io()
5800 &r1_request->encrypt_tweak_lower); in pqi_aio_submit_r1_write_io()
5801 put_unaligned_le32(encryption_info->encrypt_tweak_upper, in pqi_aio_submit_r1_write_io()
5802 &r1_request->encrypt_tweak_upper); in pqi_aio_submit_r1_write_io()
5828 io_request->io_complete_callback = pqi_aio_io_complete; in pqi_aio_submit_r56_write_io()
5829 io_request->scmd = scmd; in pqi_aio_submit_r56_write_io()
5830 io_request->raid_bypass = true; in pqi_aio_submit_r56_write_io()
5832 r56_request = io_request->iu; in pqi_aio_submit_r56_write_io()
5835 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) in pqi_aio_submit_r56_write_io()
5836 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; in pqi_aio_submit_r56_write_io()
5838 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; in pqi_aio_submit_r56_write_io()
5840 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id); in pqi_aio_submit_r56_write_io()
5841 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); in pqi_aio_submit_r56_write_io()
5842 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); in pqi_aio_submit_r56_write_io()
5843 if (rmd->raid_level == SA_RAID_6) { in pqi_aio_submit_r56_write_io()
5844 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); in pqi_aio_submit_r56_write_io()
5845 r56_request->xor_multiplier = rmd->xor_mult; in pqi_aio_submit_r56_write_io()
5847 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length); in pqi_aio_submit_r56_write_io()
5848 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; in pqi_aio_submit_r56_write_io()
5849 put_unaligned_le64(rmd->row, &r56_request->row); in pqi_aio_submit_r56_write_io()
5851 put_unaligned_le16(io_request->index, &r56_request->request_id); in pqi_aio_submit_r56_write_io()
5852 r56_request->error_index = r56_request->request_id; in pqi_aio_submit_r56_write_io()
5854 if (rmd->cdb_length > sizeof(r56_request->cdb)) in pqi_aio_submit_r56_write_io()
5855 rmd->cdb_length = sizeof(r56_request->cdb); in pqi_aio_submit_r56_write_io()
5856 r56_request->cdb_length = rmd->cdb_length; in pqi_aio_submit_r56_write_io()
5857 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); in pqi_aio_submit_r56_write_io()
5860 r56_request->data_direction = SOP_READ_FLAG; in pqi_aio_submit_r56_write_io()
5863 r56_request->encryption_enable = true; in pqi_aio_submit_r56_write_io()
5864 put_unaligned_le16(encryption_info->data_encryption_key_index, in pqi_aio_submit_r56_write_io()
5865 &r56_request->data_encryption_key_index); in pqi_aio_submit_r56_write_io()
5866 put_unaligned_le32(encryption_info->encrypt_tweak_lower, in pqi_aio_submit_r56_write_io()
5867 &r56_request->encrypt_tweak_lower); in pqi_aio_submit_r56_write_io()
5868 put_unaligned_le32(encryption_info->encrypt_tweak_upper, in pqi_aio_submit_r56_write_io()
5869 &r56_request->encrypt_tweak_upper); in pqi_aio_submit_r56_write_io()
5897 return pqi_cmd_priv(scmd)->this_residual == 0; in pqi_is_bypass_eligible_request()
5902 * back to the SML.
5910 if (!scmd->device) { in pqi_prep_for_scsi_done()
5915 device = scmd->device->hostdata; in pqi_prep_for_scsi_done()
5921 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]); in pqi_prep_for_scsi_done()
5923 wait = (struct completion *)xchg(&scmd->host_scribble, NULL); in pqi_prep_for_scsi_done()
5939 if (!ctrl_info->enable_stream_detection) in pqi_is_parity_write_stream()
5950 device = scmd->device->hostdata; in pqi_is_parity_write_stream()
5953 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) in pqi_is_parity_write_stream()
5958 * requests down non-AIO path. in pqi_is_parity_write_stream()
5960 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || in pqi_is_parity_write_stream()
5961 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) in pqi_is_parity_write_stream()
5967 pqi_stream_data = &device->stream_data[i]; in pqi_is_parity_write_stream()
5972 if ((pqi_stream_data->next_lba && in pqi_is_parity_write_stream()
5973 rmd.first_block >= pqi_stream_data->next_lba) && in pqi_is_parity_write_stream()
5974 rmd.first_block <= pqi_stream_data->next_lba + in pqi_is_parity_write_stream()
5976 pqi_stream_data->next_lba = rmd.first_block + in pqi_is_parity_write_stream()
5978 pqi_stream_data->last_accessed = jiffies; in pqi_is_parity_write_stream()
5983 if (pqi_stream_data->last_accessed == 0) { in pqi_is_parity_write_stream()
5989 if (pqi_stream_data->last_accessed <= oldest_jiffies) { in pqi_is_parity_write_stream()
5990 oldest_jiffies = pqi_stream_data->last_accessed; in pqi_is_parity_write_stream()
5996 pqi_stream_data = &device->stream_data[lru_index]; in pqi_is_parity_write_stream()
5997 pqi_stream_data->last_accessed = jiffies; in pqi_is_parity_write_stream()
5998 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; in pqi_is_parity_write_stream()
6013 scmd->host_scribble = PQI_NO_COMPLETION; in pqi_scsi_queue_command()
6015 device = scmd->device->hostdata; in pqi_scsi_queue_command()
6023 lun = (u8)scmd->device->lun; in pqi_scsi_queue_command()
6025 atomic_inc(&device->scsi_cmds_outstanding[lun]); in pqi_scsi_queue_command()
6041 * This is necessary because the SML doesn't zero out this field during in pqi_scsi_queue_command()
6044 scmd->result = 0; in pqi_scsi_queue_command()
6047 queue_group = &ctrl_info->queue_groups[hw_queue]; in pqi_scsi_queue_command()
6051 if (device->raid_bypass_enabled && in pqi_scsi_queue_command()
6057 device->raid_bypass_cnt++; in pqi_scsi_queue_command()
6063 if (device->aio_enabled) in pqi_scsi_queue_command()
6071 scmd->host_scribble = NULL; in pqi_scsi_queue_command()
6072 atomic_dec(&device->scsi_cmds_outstanding[lun]); in pqi_scsi_queue_command()
6089 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_queued_io_count()
6090 queue_group = &ctrl_info->queue_groups[i]; in pqi_queued_io_count()
6092 spin_lock_irqsave(&queue_group->submit_lock[path], flags); in pqi_queued_io_count()
6093 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry) in pqi_queued_io_count()
6095 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); in pqi_queued_io_count()
6113 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_nonempty_inbound_queue_count()
6114 queue_group = &ctrl_info->queue_groups[i]; in pqi_nonempty_inbound_queue_count()
6116 iq_pi = queue_group->iq_pi_copy[path]; in pqi_nonempty_inbound_queue_count()
6117 iq_ci = readl(queue_group->iq_ci[path]); in pqi_nonempty_inbound_queue_count()
6147 return -ENXIO; in pqi_wait_until_inbound_queues_empty()
6149 dev_warn(&ctrl_info->pci_dev->dev, in pqi_wait_until_inbound_queues_empty()
6150 …"waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: … in pqi_wait_until_inbound_queues_empty()
6151 … jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count); in pqi_wait_until_inbound_queues_empty()
6159 dev_warn(&ctrl_info->pci_dev->dev, in pqi_wait_until_inbound_queues_empty()
6161 jiffies_to_msecs(jiffies - start_jiffies) / 1000); in pqi_wait_until_inbound_queues_empty()
6178 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_fail_io_queued_for_device()
6179 queue_group = &ctrl_info->queue_groups[i]; in pqi_fail_io_queued_for_device()
6183 &queue_group->submit_lock[path], flags); in pqi_fail_io_queued_for_device()
6186 &queue_group->request_list[path], in pqi_fail_io_queued_for_device()
6189 scmd = io_request->scmd; in pqi_fail_io_queued_for_device()
6193 scsi_device = scmd->device->hostdata; in pqi_fail_io_queued_for_device()
6197 if ((u8)scmd->device->lun != lun) in pqi_fail_io_queued_for_device()
6200 list_del(&io_request->request_list_entry); in pqi_fail_io_queued_for_device()
6208 &queue_group->submit_lock[path], flags); in pqi_fail_io_queued_for_device()
6226 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) { in pqi_device_wait_for_pending_io()
6227 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) { in pqi_device_wait_for_pending_io()
6230 return -ENXIO; in pqi_device_wait_for_pending_io()
6232 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies); in pqi_device_wait_for_pending_io()
6234 dev_err(&ctrl_info->pci_dev->dev, in pqi_device_wait_for_pending_io()
6236 ctrl_info->scsi_host->host_no, device->bus, device->target, in pqi_device_wait_for_pending_io()
6238 return -ETIMEDOUT; in pqi_device_wait_for_pending_io()
6241 dev_warn(&ctrl_info->pci_dev->dev, in pqi_device_wait_for_pending_io()
6243 ctrl_info->scsi_host->host_no, device->bus, device->target, in pqi_device_wait_for_pending_io()
6281 rc = -ENXIO; in pqi_wait_for_lun_reset_completion()
6286 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]); in pqi_wait_for_lun_reset_completion()
6287 dev_warn(&ctrl_info->pci_dev->dev, in pqi_wait_for_lun_reset_completion()
6289 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding); in pqi_wait_for_lun_reset_completion()
6305 io_request->io_complete_callback = pqi_lun_reset_complete; in pqi_lun_reset()
6306 io_request->context = &wait; in pqi_lun_reset()
6308 request = io_request->iu; in pqi_lun_reset()
6311 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; in pqi_lun_reset()
6312 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, in pqi_lun_reset()
6313 &request->header.iu_length); in pqi_lun_reset()
6314 put_unaligned_le16(io_request->index, &request->request_id); in pqi_lun_reset()
6315 memcpy(request->lun_number, device->scsi3addr, in pqi_lun_reset()
6316 sizeof(request->lun_number)); in pqi_lun_reset()
6317 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) in pqi_lun_reset()
6318 request->ml_device_lun_number = lun; in pqi_lun_reset()
6319 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; in pqi_lun_reset()
6320 if (ctrl_info->tmf_iu_timeout_supported) in pqi_lun_reset()
6321 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout); in pqi_lun_reset()
6323 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, in pqi_lun_reset()
6328 rc = io_request->status; in pqi_lun_reset()
6349 …if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIE… in pqi_lun_reset_with_retries()
6387 mutex_lock(&ctrl_info->lun_reset_mutex); in pqi_device_reset_handler()
6389 dev_err(&ctrl_info->pci_dev->dev, in pqi_device_reset_handler()
6391 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); in pqi_device_reset_handler()
6399 dev_err(&ctrl_info->pci_dev->dev, in pqi_device_reset_handler()
6401 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, in pqi_device_reset_handler()
6404 mutex_unlock(&ctrl_info->lun_reset_mutex); in pqi_device_reset_handler()
6416 shost = scmd->device->host; in pqi_eh_device_reset_handler()
6418 device = scmd->device->hostdata; in pqi_eh_device_reset_handler()
6419 scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; in pqi_eh_device_reset_handler()
6421 return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode); in pqi_eh_device_reset_handler()
6430 scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL); in pqi_tmf_worker()
6432 …pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scs… in pqi_tmf_worker()
6443 shost = scmd->device->host; in pqi_eh_abort_handler()
6445 device = scmd->device->hostdata; in pqi_eh_abort_handler()
6447 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_abort_handler()
6449 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); in pqi_eh_abort_handler()
6451 if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) { in pqi_eh_abort_handler()
6452 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_abort_handler()
6454 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); in pqi_eh_abort_handler()
6455 scmd->result = DID_RESET << 16; in pqi_eh_abort_handler()
6459 tmf_work = &device->tmf_work[scmd->device->lun]; in pqi_eh_abort_handler()
6461 if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) { in pqi_eh_abort_handler()
6462 tmf_work->ctrl_info = ctrl_info; in pqi_eh_abort_handler()
6463 tmf_work->device = device; in pqi_eh_abort_handler()
6464 tmf_work->lun = (u8)scmd->device->lun; in pqi_eh_abort_handler()
6465 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; in pqi_eh_abort_handler()
6466 schedule_work(&tmf_work->work_struct); in pqi_eh_abort_handler()
6471 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_abort_handler()
6473 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); in pqi_eh_abort_handler()
6488 ctrl_info = shost_to_hba(sdev->host); in pqi_slave_alloc()
6490 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_alloc()
6497 if (device->target_lun_valid) { in pqi_slave_alloc()
6498 device->ignore_device = true; in pqi_slave_alloc()
6500 device->target = sdev_id(sdev); in pqi_slave_alloc()
6501 device->lun = sdev->lun; in pqi_slave_alloc()
6502 device->target_lun_valid = true; in pqi_slave_alloc()
6507 sdev_id(sdev), sdev->lun); in pqi_slave_alloc()
6511 sdev->hostdata = device; in pqi_slave_alloc()
6512 device->sdev = sdev; in pqi_slave_alloc()
6513 if (device->queue_depth) { in pqi_slave_alloc()
6514 device->advertised_queue_depth = device->queue_depth; in pqi_slave_alloc()
6516 device->advertised_queue_depth); in pqi_slave_alloc()
6521 sdev->allow_restart = 1; in pqi_slave_alloc()
6522 if (device->device_type == SA_DEVICE_TYPE_NVME) in pqi_slave_alloc()
6527 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_alloc()
6536 if (!ctrl_info->disable_managed_interrupts) in pqi_map_queues()
6537 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], in pqi_map_queues()
6538 ctrl_info->pci_dev, 0); in pqi_map_queues()
6540 return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); in pqi_map_queues()
6545 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; in pqi_is_tape_changer_device()
6553 device = sdev->hostdata; in pqi_slave_configure()
6554 device->devtype = sdev->type; in pqi_slave_configure()
6556 if (pqi_is_tape_changer_device(device) && device->ignore_device) { in pqi_slave_configure()
6557 rc = -ENXIO; in pqi_slave_configure()
6558 device->ignore_device = false; in pqi_slave_configure()
6571 ctrl_info = shost_to_hba(sdev->host); in pqi_slave_destroy()
6573 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); in pqi_slave_destroy()
6577 device = sdev->hostdata; in pqi_slave_destroy()
6579 mutex_unlock(&ctrl_info->scan_mutex); in pqi_slave_destroy()
6583 device->lun_count--; in pqi_slave_destroy()
6584 if (device->lun_count > 0) { in pqi_slave_destroy()
6585 mutex_unlock(&ctrl_info->scan_mutex); in pqi_slave_destroy()
6589 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_destroy()
6590 list_del(&device->scsi_device_list_entry); in pqi_slave_destroy()
6591 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_destroy()
6593 mutex_unlock(&ctrl_info->scan_mutex); in pqi_slave_destroy()
6607 return -EINVAL; in pqi_getpciinfo_ioctl()
6609 pci_dev = ctrl_info->pci_dev; in pqi_getpciinfo_ioctl()
6611 pci_info.domain = pci_domain_nr(pci_dev->bus); in pqi_getpciinfo_ioctl()
6612 pci_info.bus = pci_dev->bus->number; in pqi_getpciinfo_ioctl()
6613 pci_info.dev_fn = pci_dev->devfn; in pqi_getpciinfo_ioctl()
6614 subsystem_vendor = pci_dev->subsystem_vendor; in pqi_getpciinfo_ioctl()
6615 subsystem_device = pci_dev->subsystem_device; in pqi_getpciinfo_ioctl()
6619 return -EFAULT; in pqi_getpciinfo_ioctl()
6629 return -EINVAL; in pqi_getdrivver_ioctl()
6635 return -EFAULT; in pqi_getdrivver_ioctl()
6652 switch (pqi_error_info->data_out_result) { in pqi_error_info_to_ciss()
6696 get_unaligned_le16(&pqi_error_info->sense_data_length); in pqi_error_info_to_ciss()
6699 get_unaligned_le16(&pqi_error_info->response_data_length); in pqi_error_info_to_ciss()
6701 if (sense_data_length > sizeof(pqi_error_info->data)) in pqi_error_info_to_ciss()
6702 sense_data_length = sizeof(pqi_error_info->data); in pqi_error_info_to_ciss()
6704 ciss_error_info->scsi_status = pqi_error_info->status; in pqi_error_info_to_ciss()
6705 ciss_error_info->command_status = ciss_cmd_status; in pqi_error_info_to_ciss()
6706 ciss_error_info->sense_data_length = sense_data_length; in pqi_error_info_to_ciss()
6721 return -ENXIO; in pqi_passthru_ioctl()
6723 return -EBUSY; in pqi_passthru_ioctl()
6725 return -EINVAL; in pqi_passthru_ioctl()
6727 return -EPERM; in pqi_passthru_ioctl()
6729 return -EFAULT; in pqi_passthru_ioctl()
6732 return -EINVAL; in pqi_passthru_ioctl()
6734 return -EINVAL; in pqi_passthru_ioctl()
6736 return -EINVAL; in pqi_passthru_ioctl()
6745 return -EINVAL; in pqi_passthru_ioctl()
6751 return -ENOMEM; in pqi_passthru_ioctl()
6755 rc = -EFAULT; in pqi_passthru_ioctl()
6766 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - in pqi_passthru_ioctl()
6793 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_passthru_ioctl()
6804 if (ctrl_info->raid_iu_timeout_supported) in pqi_passthru_ioctl()
6811 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, in pqi_passthru_ioctl()
6834 rc = -EFAULT; in pqi_passthru_ioctl()
6842 rc = -EFAULT; in pqi_passthru_ioctl()
6858 ctrl_info = shost_to_hba(sdev->host); in pqi_ioctl()
6876 rc = -EINVAL; in pqi_ioctl()
6892 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); in pqi_firmware_version_show()
6910 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); in pqi_serial_number_show()
6922 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); in pqi_model_show()
6934 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); in pqi_vendor_show()
6955 count += scnprintf(buffer + count, PAGE_SIZE - count, in pqi_lockup_action_show()
6958 count += scnprintf(buffer + count, PAGE_SIZE - count, in pqi_lockup_action_show()
6962 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); in pqi_lockup_action_show()
6984 return -EINVAL; in pqi_lockup_action_store()
6994 ctrl_info->enable_stream_detection); in pqi_host_enable_stream_detection_show()
7005 return -EINVAL; in pqi_host_enable_stream_detection_store()
7010 ctrl_info->enable_stream_detection = set_stream_detection; in pqi_host_enable_stream_detection_store()
7021 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); in pqi_host_enable_r5_writes_show()
7032 return -EINVAL; in pqi_host_enable_r5_writes_store()
7037 ctrl_info->enable_r5_writes = set_r5_writes; in pqi_host_enable_r5_writes_store()
7048 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); in pqi_host_enable_r6_writes_show()
7059 return -EINVAL; in pqi_host_enable_r6_writes_store()
7064 ctrl_info->enable_r6_writes = set_r6_writes; in pqi_host_enable_r6_writes_store()
7111 ctrl_info = shost_to_hba(sdev->host); in pqi_unique_id_show()
7114 return -ENODEV; in pqi_unique_id_show()
7116 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
7118 device = sdev->hostdata; in pqi_unique_id_show()
7120 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
7121 return -ENODEV; in pqi_unique_id_show()
7124 if (device->is_physical_device) in pqi_unique_id_show()
7125 memcpy(unique_id, device->wwid, sizeof(device->wwid)); in pqi_unique_id_show()
7127 memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); in pqi_unique_id_show()
7129 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
7150 ctrl_info = shost_to_hba(sdev->host); in pqi_lunid_show()
7153 return -ENODEV; in pqi_lunid_show()
7155 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
7157 device = sdev->hostdata; in pqi_lunid_show()
7159 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
7160 return -ENODEV; in pqi_lunid_show()
7163 memcpy(lunid, device->scsi3addr, sizeof(lunid)); in pqi_lunid_show()
7165 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
7188 ctrl_info = shost_to_hba(sdev->host); in pqi_path_info_show()
7191 return -ENODEV; in pqi_path_info_show()
7193 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
7195 device = sdev->hostdata; in pqi_path_info_show()
7197 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
7198 return -ENODEV; in pqi_path_info_show()
7201 bay = device->bay; in pqi_path_info_show()
7204 if (i == device->active_path_index) in pqi_path_info_show()
7206 else if (device->path_map & path_map_index) in pqi_path_info_show()
7212 PAGE_SIZE - output_len, in pqi_path_info_show()
7214 ctrl_info->scsi_host->host_no, in pqi_path_info_show()
7215 device->bus, device->target, in pqi_path_info_show()
7216 device->lun, in pqi_path_info_show()
7217 scsi_device_type(device->devtype)); in pqi_path_info_show()
7219 if (device->devtype == TYPE_RAID || in pqi_path_info_show()
7223 memcpy(&phys_connector, &device->phys_connector[i], in pqi_path_info_show()
7231 PAGE_SIZE - output_len, in pqi_path_info_show()
7234 box = device->box[i]; in pqi_path_info_show()
7237 PAGE_SIZE - output_len, in pqi_path_info_show()
7240 if ((device->devtype == TYPE_DISK || in pqi_path_info_show()
7241 device->devtype == TYPE_ZBC) && in pqi_path_info_show()
7244 PAGE_SIZE - output_len, in pqi_path_info_show()
7249 PAGE_SIZE - output_len, in pqi_path_info_show()
7253 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
7268 ctrl_info = shost_to_hba(sdev->host); in pqi_sas_address_show()
7271 return -ENODEV; in pqi_sas_address_show()
7273 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
7275 device = sdev->hostdata; in pqi_sas_address_show()
7277 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
7278 return -ENODEV; in pqi_sas_address_show()
7281 sas_address = device->sas_address; in pqi_sas_address_show()
7283 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
7297 ctrl_info = shost_to_hba(sdev->host); in pqi_ssd_smart_path_enabled_show()
7300 return -ENODEV; in pqi_ssd_smart_path_enabled_show()
7302 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
7304 device = sdev->hostdata; in pqi_ssd_smart_path_enabled_show()
7306 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
7307 return -ENODEV; in pqi_ssd_smart_path_enabled_show()
7310 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; in pqi_ssd_smart_path_enabled_show()
7314 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
7329 ctrl_info = shost_to_hba(sdev->host); in pqi_raid_level_show()
7332 return -ENODEV; in pqi_raid_level_show()
7334 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
7336 device = sdev->hostdata; in pqi_raid_level_show()
7338 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
7339 return -ENODEV; in pqi_raid_level_show()
7342 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) in pqi_raid_level_show()
7343 raid_level = pqi_raid_level_to_string(device->raid_level); in pqi_raid_level_show()
7347 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
7362 ctrl_info = shost_to_hba(sdev->host); in pqi_raid_bypass_cnt_show()
7365 return -ENODEV; in pqi_raid_bypass_cnt_show()
7367 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
7369 device = sdev->hostdata; in pqi_raid_bypass_cnt_show()
7371 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
7372 return -ENODEV; in pqi_raid_bypass_cnt_show()
7375 raid_bypass_cnt = device->raid_bypass_cnt; in pqi_raid_bypass_cnt_show()
7377 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
7392 ctrl_info = shost_to_hba(sdev->host); in pqi_sas_ncq_prio_enable_show()
7395 return -ENODEV; in pqi_sas_ncq_prio_enable_show()
7397 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_show()
7399 device = sdev->hostdata; in pqi_sas_ncq_prio_enable_show()
7401 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_show()
7402 return -ENODEV; in pqi_sas_ncq_prio_enable_show()
7406 device->ncq_prio_enable); in pqi_sas_ncq_prio_enable_show()
7407 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_show()
7423 return -EINVAL; in pqi_sas_ncq_prio_enable_store()
7426 ctrl_info = shost_to_hba(sdev->host); in pqi_sas_ncq_prio_enable_store()
7428 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_store()
7430 device = sdev->hostdata; in pqi_sas_ncq_prio_enable_store()
7433 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_store()
7434 return -ENODEV; in pqi_sas_ncq_prio_enable_store()
7437 if (!device->ncq_prio_support) { in pqi_sas_ncq_prio_enable_store()
7438 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_store()
7439 return -EINVAL; in pqi_sas_ncq_prio_enable_store()
7442 device->ncq_prio_enable = ncq_prio_enable; in pqi_sas_ncq_prio_enable_store()
7444 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_store()
7456 ctrl_info = shost_to_hba(sdev->host); in pqi_numa_node_show()
7458 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); in pqi_numa_node_show()
7494 .this_id = -1,
7514 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); in pqi_register_scsi()
7515 return -ENOMEM; in pqi_register_scsi()
7518 shost->io_port = 0; in pqi_register_scsi()
7519 shost->n_io_port = 0; in pqi_register_scsi()
7520 shost->this_id = -1; in pqi_register_scsi()
7521 shost->max_channel = PQI_MAX_BUS; in pqi_register_scsi()
7522 shost->max_cmd_len = MAX_COMMAND_SIZE; in pqi_register_scsi()
7523 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE; in pqi_register_scsi()
7524 shost->max_id = ~0; in pqi_register_scsi()
7525 shost->max_sectors = ctrl_info->max_sectors; in pqi_register_scsi()
7526 shost->can_queue = ctrl_info->scsi_ml_can_queue; in pqi_register_scsi()
7527 shost->cmd_per_lun = shost->can_queue; in pqi_register_scsi()
7528 shost->sg_tablesize = ctrl_info->sg_tablesize; in pqi_register_scsi()
7529 shost->transportt = pqi_sas_transport_template; in pqi_register_scsi()
7530 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); in pqi_register_scsi()
7531 shost->unique_id = shost->irq; in pqi_register_scsi()
7532 shost->nr_hw_queues = ctrl_info->num_queue_groups; in pqi_register_scsi()
7533 shost->host_tagset = 1; in pqi_register_scsi()
7534 shost->hostdata[0] = (unsigned long)ctrl_info; in pqi_register_scsi()
7536 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); in pqi_register_scsi()
7538 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); in pqi_register_scsi()
7544 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); in pqi_register_scsi()
7548 ctrl_info->scsi_host = shost; in pqi_register_scsi()
7566 shost = ctrl_info->scsi_host; in pqi_unregister_scsi()
7582 pqi_registers = ctrl_info->pqi_registers; in pqi_wait_for_pqi_reset_completion()
7583 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; in pqi_wait_for_pqi_reset_completion()
7588 reset_reg.all_bits = readl(&pqi_registers->device_reset); in pqi_wait_for_pqi_reset_completion()
7592 rc = -ENXIO; in pqi_wait_for_pqi_reset_completion()
7596 rc = -ETIMEDOUT; in pqi_wait_for_pqi_reset_completion()
7609 if (ctrl_info->pqi_reset_quiesce_supported) { in pqi_reset()
7612 dev_err(&ctrl_info->pci_dev->dev, in pqi_reset()
7622 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); in pqi_reset()
7626 dev_err(&ctrl_info->pci_dev->dev, in pqi_reset()
7639 return -ENOMEM; in pqi_get_ctrl_serial_number()
7645 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, in pqi_get_ctrl_serial_number()
7646 sizeof(sense_info->ctrl_serial_number)); in pqi_get_ctrl_serial_number()
7647 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; in pqi_get_ctrl_serial_number()
7662 return -ENOMEM; in pqi_get_ctrl_product_details()
7668 if (get_unaligned_le32(&identify->extra_controller_flags) & in pqi_get_ctrl_product_details()
7670 memcpy(ctrl_info->firmware_version, in pqi_get_ctrl_product_details()
7671 identify->firmware_version_long, in pqi_get_ctrl_product_details()
7672 sizeof(identify->firmware_version_long)); in pqi_get_ctrl_product_details()
7674 memcpy(ctrl_info->firmware_version, in pqi_get_ctrl_product_details()
7675 identify->firmware_version_short, in pqi_get_ctrl_product_details()
7676 sizeof(identify->firmware_version_short)); in pqi_get_ctrl_product_details()
7677 ctrl_info->firmware_version in pqi_get_ctrl_product_details()
7678 [sizeof(identify->firmware_version_short)] = '\0'; in pqi_get_ctrl_product_details()
7679 snprintf(ctrl_info->firmware_version + in pqi_get_ctrl_product_details()
7680 strlen(ctrl_info->firmware_version), in pqi_get_ctrl_product_details()
7681 sizeof(ctrl_info->firmware_version) - in pqi_get_ctrl_product_details()
7682 sizeof(identify->firmware_version_short), in pqi_get_ctrl_product_details()
7683 "-%u", in pqi_get_ctrl_product_details()
7684 get_unaligned_le16(&identify->firmware_build_number)); in pqi_get_ctrl_product_details()
7687 memcpy(ctrl_info->model, identify->product_id, in pqi_get_ctrl_product_details()
7688 sizeof(identify->product_id)); in pqi_get_ctrl_product_details()
7689 ctrl_info->model[sizeof(identify->product_id)] = '\0'; in pqi_get_ctrl_product_details()
7691 memcpy(ctrl_info->vendor, identify->vendor_id, in pqi_get_ctrl_product_details()
7692 sizeof(identify->vendor_id)); in pqi_get_ctrl_product_details()
7693 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; in pqi_get_ctrl_product_details()
7695 dev_info(&ctrl_info->pci_dev->dev, in pqi_get_ctrl_product_details()
7696 "Firmware version: %s\n", ctrl_info->firmware_version); in pqi_get_ctrl_product_details()
7719 if (byte_index >= le16_to_cpu(firmware_features->num_elements)) in pqi_is_firmware_feature_supported()
7722 return firmware_features->features_supported[byte_index] & in pqi_is_firmware_feature_supported()
7735 (le16_to_cpu(firmware_features->num_elements) * 2); in pqi_is_firmware_feature_enabled()
7752 le16_to_cpu(firmware_features->num_elements); in pqi_request_firmware_feature()
7754 firmware_features->features_supported[byte_index] |= in pqi_request_firmware_feature()
7766 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, in pqi_config_table_update()
7786 features_requested = firmware_features->features_supported + in pqi_enable_firmware_features()
7787 le16_to_cpu(firmware_features->num_elements); in pqi_enable_firmware_features()
7790 (features_requested - (void *)firmware_features); in pqi_enable_firmware_features()
7793 le16_to_cpu(firmware_features->num_elements)); in pqi_enable_firmware_features()
7799 (le16_to_cpu(firmware_features->num_elements) * 2) + in pqi_enable_firmware_features()
7822 if (!firmware_feature->supported) { in pqi_firmware_feature_status()
7823 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", in pqi_firmware_feature_status()
7824 firmware_feature->feature_name); in pqi_firmware_feature_status()
7828 if (firmware_feature->enabled) { in pqi_firmware_feature_status()
7829 dev_info(&ctrl_info->pci_dev->dev, in pqi_firmware_feature_status()
7830 "%s enabled\n", firmware_feature->feature_name); in pqi_firmware_feature_status()
7834 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", in pqi_firmware_feature_status()
7835 firmware_feature->feature_name); in pqi_firmware_feature_status()
7841 switch (firmware_feature->feature_bit) { in pqi_ctrl_update_feature_flags()
7843 ctrl_info->enable_r1_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7846 ctrl_info->enable_r5_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7849 ctrl_info->enable_r6_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7852 ctrl_info->soft_reset_handshake_supported = in pqi_ctrl_update_feature_flags()
7853 firmware_feature->enabled && in pqi_ctrl_update_feature_flags()
7857 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7860 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7863 ctrl_info->firmware_triage_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7864 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled); in pqi_ctrl_update_feature_flags()
7867 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7870 ctrl_info->multi_lun_device_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7880 if (firmware_feature->feature_status) in pqi_firmware_feature_update()
7881 firmware_feature->feature_status(ctrl_info, firmware_feature); in pqi_firmware_feature_update()
7973 .feature_name = "Multi-LUN Target",
7989 ctrl_info = section_info->ctrl_info; in pqi_process_firmware_features()
7990 firmware_features = section_info->section; in pqi_process_firmware_features()
7991 firmware_features_iomem_addr = section_info->section_iomem_addr; in pqi_process_firmware_features()
8018 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_firmware_features()
8068 ctrl_info->heartbeat_counter = NULL; in pqi_ctrl_reset_config()
8069 ctrl_info->soft_reset_status = NULL; in pqi_ctrl_reset_config()
8070 ctrl_info->soft_reset_handshake_supported = false; in pqi_ctrl_reset_config()
8071 ctrl_info->enable_r1_writes = false; in pqi_ctrl_reset_config()
8072 ctrl_info->enable_r5_writes = false; in pqi_ctrl_reset_config()
8073 ctrl_info->enable_r6_writes = false; in pqi_ctrl_reset_config()
8074 ctrl_info->raid_iu_timeout_supported = false; in pqi_ctrl_reset_config()
8075 ctrl_info->tmf_iu_timeout_supported = false; in pqi_ctrl_reset_config()
8076 ctrl_info->firmware_triage_supported = false; in pqi_ctrl_reset_config()
8077 ctrl_info->rpl_extended_format_4_5_supported = false; in pqi_ctrl_reset_config()
8078 ctrl_info->multi_lun_device_supported = false; in pqi_ctrl_reset_config()
8092 table_length = ctrl_info->config_table_length; in pqi_process_config_table()
8098 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_config_table()
8100 return -ENOMEM; in pqi_process_config_table()
8107 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; in pqi_process_config_table()
8112 section_offset = get_unaligned_le32(&config_table->first_section_offset); in pqi_process_config_table()
8121 switch (get_unaligned_le16(§ion->section_id)) { in pqi_process_config_table()
8128 dev_warn(&ctrl_info->pci_dev->dev, in pqi_process_config_table()
8131 ctrl_info->heartbeat_counter = in pqi_process_config_table()
8138 ctrl_info->soft_reset_status = in pqi_process_config_table()
8146 section_offset = get_unaligned_le16(§ion->next_section_offset); in pqi_process_config_table()
8174 dev_err(&ctrl_info->pci_dev->dev, in pqi_revert_to_sis_mode()
8175 "re-enabling SIS mode failed with error %d\n", rc); in pqi_revert_to_sis_mode()
8191 return -ENXIO; in pqi_force_sis_mode()
8245 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8259 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8266 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8272 ctrl_info->product_id = (u8)product_id; in pqi_ctrl_init()
8273 ctrl_info->product_revision = (u8)(product_id >> 8); in pqi_ctrl_init()
8276 if (ctrl_info->max_outstanding_requests > in pqi_ctrl_init()
8278 ctrl_info->max_outstanding_requests = in pqi_ctrl_init()
8281 if (ctrl_info->max_outstanding_requests > in pqi_ctrl_init()
8283 ctrl_info->max_outstanding_requests = in pqi_ctrl_init()
8291 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8303 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8308 /* Wait for the controller to complete the SIS -> PQI transition. */ in pqi_ctrl_init()
8311 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8317 ctrl_info->pqi_mode_enabled = true; in pqi_ctrl_init()
8322 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8329 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8336 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8351 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { in pqi_ctrl_init()
8352 ctrl_info->max_msix_vectors = in pqi_ctrl_init()
8353 ctrl_info->num_msix_vectors_enabled; in pqi_ctrl_init()
8363 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8380 ctrl_info->controller_online = true; in pqi_ctrl_init()
8388 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { in pqi_ctrl_init()
8391 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8395 ctrl_info->ciss_report_log_flags |= in pqi_ctrl_init()
8401 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8413 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8420 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8427 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8428 "error enabling multi-lun rescan\n"); in pqi_ctrl_init()
8434 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8452 admin_queues = &ctrl_info->admin_queues; in pqi_reinit_queues()
8453 admin_queues->iq_pi_copy = 0; in pqi_reinit_queues()
8454 admin_queues->oq_ci_copy = 0; in pqi_reinit_queues()
8455 writel(0, admin_queues->oq_pi); in pqi_reinit_queues()
8457 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_reinit_queues()
8458 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; in pqi_reinit_queues()
8459 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; in pqi_reinit_queues()
8460 ctrl_info->queue_groups[i].oq_ci_copy = 0; in pqi_reinit_queues()
8462 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); in pqi_reinit_queues()
8463 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); in pqi_reinit_queues()
8464 writel(0, ctrl_info->queue_groups[i].oq_pi); in pqi_reinit_queues()
8467 event_queue = &ctrl_info->event_queue; in pqi_reinit_queues()
8468 writel(0, event_queue->oq_pi); in pqi_reinit_queues()
8469 event_queue->oq_ci_copy = 0; in pqi_reinit_queues()
8494 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8501 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8513 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8518 /* Wait for the controller to complete the SIS -> PQI transition. */ in pqi_ctrl_init_resume()
8521 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8527 ctrl_info->pqi_mode_enabled = true; in pqi_ctrl_init_resume()
8534 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8545 ctrl_info->controller_online = true; in pqi_ctrl_init_resume()
8556 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { in pqi_ctrl_init_resume()
8559 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8563 ctrl_info->ciss_report_log_flags |= in pqi_ctrl_init_resume()
8569 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8576 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8583 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8584 "error enabling multi-lun rescan\n"); in pqi_ctrl_init_resume()
8590 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8618 rc = pci_enable_device(ctrl_info->pci_dev); in pqi_pci_init()
8620 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8630 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); in pqi_pci_init()
8632 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); in pqi_pci_init()
8636 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); in pqi_pci_init()
8638 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8643 ctrl_info->iomem_base = ioremap(pci_resource_start( in pqi_pci_init()
8644 ctrl_info->pci_dev, 0), in pqi_pci_init()
8645 pci_resource_len(ctrl_info->pci_dev, 0)); in pqi_pci_init()
8646 if (!ctrl_info->iomem_base) { in pqi_pci_init()
8647 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8649 rc = -ENOMEM; in pqi_pci_init()
8656 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, in pqi_pci_init()
8659 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8665 pci_set_master(ctrl_info->pci_dev); in pqi_pci_init()
8667 ctrl_info->registers = ctrl_info->iomem_base; in pqi_pci_init()
8668 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; in pqi_pci_init()
8670 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); in pqi_pci_init()
8675 pci_release_regions(ctrl_info->pci_dev); in pqi_pci_init()
8677 pci_disable_device(ctrl_info->pci_dev); in pqi_pci_init()
8684 iounmap(ctrl_info->iomem_base); in pqi_cleanup_pci_init()
8685 pci_release_regions(ctrl_info->pci_dev); in pqi_cleanup_pci_init()
8686 if (pci_is_enabled(ctrl_info->pci_dev)) in pqi_cleanup_pci_init()
8687 pci_disable_device(ctrl_info->pci_dev); in pqi_cleanup_pci_init()
8688 pci_set_drvdata(ctrl_info->pci_dev, NULL); in pqi_cleanup_pci_init()
8700 mutex_init(&ctrl_info->scan_mutex); in pqi_alloc_ctrl_info()
8701 mutex_init(&ctrl_info->lun_reset_mutex); in pqi_alloc_ctrl_info()
8702 mutex_init(&ctrl_info->ofa_mutex); in pqi_alloc_ctrl_info()
8704 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); in pqi_alloc_ctrl_info()
8705 spin_lock_init(&ctrl_info->scsi_device_list_lock); in pqi_alloc_ctrl_info()
8707 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); in pqi_alloc_ctrl_info()
8708 atomic_set(&ctrl_info->num_interrupts, 0); in pqi_alloc_ctrl_info()
8710 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); in pqi_alloc_ctrl_info()
8711 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); in pqi_alloc_ctrl_info()
8713 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); in pqi_alloc_ctrl_info()
8714 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); in pqi_alloc_ctrl_info()
8716 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); in pqi_alloc_ctrl_info()
8717 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); in pqi_alloc_ctrl_info()
8719 sema_init(&ctrl_info->sync_request_sem, in pqi_alloc_ctrl_info()
8721 init_waitqueue_head(&ctrl_info->block_requests_wait); in pqi_alloc_ctrl_info()
8723 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; in pqi_alloc_ctrl_info()
8724 ctrl_info->irq_mode = IRQ_MODE_NONE; in pqi_alloc_ctrl_info()
8725 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; in pqi_alloc_ctrl_info()
8727 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; in pqi_alloc_ctrl_info()
8728 ctrl_info->max_transfer_encrypted_sas_sata = in pqi_alloc_ctrl_info()
8730 ctrl_info->max_transfer_encrypted_nvme = in pqi_alloc_ctrl_info()
8732 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; in pqi_alloc_ctrl_info()
8733 ctrl_info->max_write_raid_1_10_2drive = ~0; in pqi_alloc_ctrl_info()
8734 ctrl_info->max_write_raid_1_10_3drive = ~0; in pqi_alloc_ctrl_info()
8735 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts; in pqi_alloc_ctrl_info()
8754 if (ctrl_info->queue_memory_base) in pqi_free_ctrl_resources()
8755 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8756 ctrl_info->queue_memory_length, in pqi_free_ctrl_resources()
8757 ctrl_info->queue_memory_base, in pqi_free_ctrl_resources()
8758 ctrl_info->queue_memory_base_dma_handle); in pqi_free_ctrl_resources()
8759 if (ctrl_info->admin_queue_memory_base) in pqi_free_ctrl_resources()
8760 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8761 ctrl_info->admin_queue_memory_length, in pqi_free_ctrl_resources()
8762 ctrl_info->admin_queue_memory_base, in pqi_free_ctrl_resources()
8763 ctrl_info->admin_queue_memory_base_dma_handle); in pqi_free_ctrl_resources()
8765 if (ctrl_info->error_buffer) in pqi_free_ctrl_resources()
8766 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8767 ctrl_info->error_buffer_length, in pqi_free_ctrl_resources()
8768 ctrl_info->error_buffer, in pqi_free_ctrl_resources()
8769 ctrl_info->error_buffer_dma_handle); in pqi_free_ctrl_resources()
8770 if (ctrl_info->iomem_base) in pqi_free_ctrl_resources()
8777 ctrl_info->controller_online = false; in pqi_remove_ctrl()
8782 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) { in pqi_remove_ctrl()
8784 ctrl_info->pqi_mode_enabled = false; in pqi_remove_ctrl()
8787 if (ctrl_info->pqi_mode_enabled) in pqi_remove_ctrl()
8820 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_alloc_mem()
8826 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); in pqi_ofa_alloc_mem()
8827 if (!ctrl_info->pqi_ofa_chunk_virt_addr) in pqi_ofa_alloc_mem()
8830 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_alloc_mem()
8833 ctrl_info->pqi_ofa_chunk_virt_addr[i] = in pqi_ofa_alloc_mem()
8835 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) in pqi_ofa_alloc_mem()
8837 mem_descriptor = &ofap->sg_descriptor[i]; in pqi_ofa_alloc_mem()
8838 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); in pqi_ofa_alloc_mem()
8839 put_unaligned_le32(chunk_size, &mem_descriptor->length); in pqi_ofa_alloc_mem()
8842 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); in pqi_ofa_alloc_mem()
8843 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); in pqi_ofa_alloc_mem()
8844 put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated); in pqi_ofa_alloc_mem()
8849 while (--i >= 0) { in pqi_ofa_alloc_mem()
8850 mem_descriptor = &ofap->sg_descriptor[i]; in pqi_ofa_alloc_mem()
8852 ctrl_info->pqi_ofa_chunk_virt_addr[i], in pqi_ofa_alloc_mem()
8853 get_unaligned_le64(&mem_descriptor->address)); in pqi_ofa_alloc_mem()
8855 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); in pqi_ofa_alloc_mem()
8858 return -ENOMEM; in pqi_ofa_alloc_mem()
8867 if (ctrl_info->ofa_bytes_requested == 0) in pqi_ofa_alloc_host_buffer()
8870 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); in pqi_ofa_alloc_host_buffer()
8881 return -ENOMEM; in pqi_ofa_alloc_host_buffer()
8889 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_setup_host_buffer()
8892 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); in pqi_ofa_setup_host_buffer()
8896 ctrl_info->pqi_ofa_mem_virt_addr = ofap; in pqi_ofa_setup_host_buffer()
8901 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle); in pqi_ofa_setup_host_buffer()
8902 ctrl_info->pqi_ofa_mem_virt_addr = NULL; in pqi_ofa_setup_host_buffer()
8906 put_unaligned_le16(PQI_OFA_VERSION, &ofap->version); in pqi_ofa_setup_host_buffer()
8907 memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature)); in pqi_ofa_setup_host_buffer()
8918 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_free_host_buffer()
8922 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_free_host_buffer()
8924 if (get_unaligned_le32(&ofap->bytes_allocated) == 0) in pqi_ofa_free_host_buffer()
8927 mem_descriptor = ofap->sg_descriptor; in pqi_ofa_free_host_buffer()
8929 get_unaligned_le16(&ofap->num_memory_descriptors); in pqi_ofa_free_host_buffer()
8934 ctrl_info->pqi_ofa_chunk_virt_addr[i], in pqi_ofa_free_host_buffer()
8937 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); in pqi_ofa_free_host_buffer()
8941 ctrl_info->pqi_ofa_mem_dma_handle); in pqi_ofa_free_host_buffer()
8942 ctrl_info->pqi_ofa_mem_virt_addr = NULL; in pqi_ofa_free_host_buffer()
8954 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, in pqi_ofa_host_memory_update()
8959 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_host_memory_update()
8963 get_unaligned_le16(&ofap->num_memory_descriptors) * in pqi_ofa_host_memory_update()
8966 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, in pqi_ofa_host_memory_update()
8994 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_fail_all_outstanding_requests()
8995 io_request = &ctrl_info->io_request_pool[i]; in pqi_fail_all_outstanding_requests()
8996 if (atomic_read(&io_request->refcount) == 0) in pqi_fail_all_outstanding_requests()
8999 scmd = io_request->scmd; in pqi_fail_all_outstanding_requests()
9001 sdev = scmd->device; in pqi_fail_all_outstanding_requests()
9009 io_request->status = -ENXIO; in pqi_fail_all_outstanding_requests()
9010 io_request->error_info = in pqi_fail_all_outstanding_requests()
9014 io_request->io_complete_callback(io_request, in pqi_fail_all_outstanding_requests()
9015 io_request->context); in pqi_fail_all_outstanding_requests()
9088 if (!ctrl_info->controller_online) in pqi_take_ctrl_offline()
9091 ctrl_info->controller_online = false; in pqi_take_ctrl_offline()
9092 ctrl_info->pqi_mode_enabled = false; in pqi_take_ctrl_offline()
9096 pci_disable_device(ctrl_info->pci_dev); in pqi_take_ctrl_offline()
9097 dev_err(&ctrl_info->pci_dev->dev, in pqi_take_ctrl_offline()
9100 schedule_work(&ctrl_info->ctrl_offline_work); in pqi_take_ctrl_offline()
9108 if (id->driver_data) in pqi_print_ctrl_info()
9109 ctrl_description = (char *)id->driver_data; in pqi_print_ctrl_info()
9113 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); in pqi_print_ctrl_info()
9126 id->subvendor == PCI_ANY_ID && in pqi_pci_probe()
9127 id->subdevice == PCI_ANY_ID) { in pqi_pci_probe()
9128 dev_warn(&pci_dev->dev, in pqi_pci_probe()
9130 return -ENODEV; in pqi_pci_probe()
9133 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) in pqi_pci_probe()
9134 dev_warn(&pci_dev->dev, in pqi_pci_probe()
9137 node = dev_to_node(&pci_dev->dev); in pqi_pci_probe()
9142 set_dev_node(&pci_dev->dev, node); in pqi_pci_probe()
9147 dev_err(&pci_dev->dev, in pqi_pci_probe()
9149 return -ENOMEM; in pqi_pci_probe()
9151 ctrl_info->numa_node = node; in pqi_pci_probe()
9153 ctrl_info->pci_dev = pci_dev; in pqi_pci_probe()
9181 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id); in pqi_pci_remove()
9183 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL; in pqi_pci_remove()
9185 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL; in pqi_pci_remove()
9187 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) { in pqi_pci_remove()
9190 dev_err(&pci_dev->dev, in pqi_pci_remove()
9203 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_crash_if_pending_command()
9204 io_request = &ctrl_info->io_request_pool[i]; in pqi_crash_if_pending_command()
9205 if (atomic_read(&io_request->refcount) == 0) in pqi_crash_if_pending_command()
9207 scmd = io_request->scmd; in pqi_crash_if_pending_command()
9208 WARN_ON(scmd != NULL); /* IO command from SML */ in pqi_crash_if_pending_command()
9209 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ in pqi_crash_if_pending_command()
9221 dev_err(&pci_dev->dev, in pqi_shutdown()
9239 * Write all data in the controller's battery-backed cache to in pqi_shutdown()
9244 dev_err(&pci_dev->dev, in pqi_shutdown()
9266 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", in pqi_process_lockup_action_param()
9279 …pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - … in pqi_process_ctrl_ready_timeout_param()
9283 …dy_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %… in pqi_process_ctrl_ready_timeout_param()
9301 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304) in pqi_get_flush_cache_shutdown_event()
9334 ctrl_info->controller_online = false; in pqi_suspend_or_freeze()
9335 ctrl_info->pqi_mode_enabled = false; in pqi_suspend_or_freeze()
9386 ctrl_info->controller_online = true; in pqi_thaw()
9387 ctrl_info->pqi_mode_enabled = true; in pqi_thaw()
10305 return -ENODEV; in pqi_init()
10498 data.create_operational_iq) != 64 - 11); in pqi_verify_structures()
10500 data.create_operational_oq) != 64 - 11); in pqi_verify_structures()
10502 data.delete_operational_queue) != 64 - 11); in pqi_verify_structures()