1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * driver for Microchip PQI-based storage controllers 4 * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries 5 * Copyright (c) 2016-2018 Microsemi Corporation 6 * Copyright (c) 2016 PMC-Sierra, Inc. 7 * 8 * Questions/Comments/Bugfixes to storagedev@microchip.com 9 * 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/pci.h> 15 #include <linux/delay.h> 16 #include <linux/interrupt.h> 17 #include <linux/sched.h> 18 #include <linux/rtc.h> 19 #include <linux/bcd.h> 20 #include <linux/reboot.h> 21 #include <linux/cciss_ioctl.h> 22 #include <linux/crash_dump.h> 23 #include <scsi/scsi_host.h> 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_device.h> 26 #include <scsi/scsi_eh.h> 27 #include <scsi/scsi_transport_sas.h> 28 #include <linux/unaligned.h> 29 #include "smartpqi.h" 30 #include "smartpqi_sis.h" 31 32 #if !defined(BUILD_TIMESTAMP) 33 #define BUILD_TIMESTAMP 34 #endif 35 36 #define DRIVER_VERSION "2.1.34-035" 37 #define DRIVER_MAJOR 2 38 #define DRIVER_MINOR 1 39 #define DRIVER_RELEASE 34 40 #define DRIVER_REVISION 35 41 42 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ 43 DRIVER_VERSION BUILD_TIMESTAMP ")" 44 #define DRIVER_NAME_SHORT "smartpqi" 45 46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 47 48 #define PQI_POST_RESET_DELAY_SECS 5 49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10 50 51 #define PQI_NO_COMPLETION ((void *)-1) 52 53 MODULE_AUTHOR("Microchip"); 54 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version " 55 DRIVER_VERSION); 56 MODULE_VERSION(DRIVER_VERSION); 57 MODULE_LICENSE("GPL"); 58 59 struct pqi_cmd_priv { 60 int this_residual; 61 }; 62 63 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd) 64 { 65 return scsi_cmd_priv(cmd); 66 } 67 68 static void pqi_verify_structures(void); 69 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, 70 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); 71 static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info); 72 static void pqi_ctrl_offline_worker(struct work_struct *work); 73 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 74 static void pqi_scan_start(struct Scsi_Host *shost); 75 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 76 struct pqi_queue_group *queue_group, enum pqi_io_path path, 77 struct pqi_io_request *io_request); 78 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 79 struct pqi_iu_header *request, unsigned int flags, 80 struct pqi_raid_error_info *error_info); 81 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 82 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 83 unsigned int cdb_length, struct pqi_queue_group *queue_group, 84 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio); 85 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, 86 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 87 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 88 struct pqi_scsi_dev_raid_map_data *rmd); 89 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, 90 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 91 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 92 struct pqi_scsi_dev_raid_map_data *rmd); 93 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); 94 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); 95 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); 96 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size); 97 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor); 98 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code); 99 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 100 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs); 101 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); 102 static void pqi_tmf_worker(struct work_struct *work); 103 104 /* for flags argument to pqi_submit_raid_request_synchronous() */ 105 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 106 107 static struct scsi_transport_template *pqi_sas_transport_template; 108 109 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 110 111 enum pqi_lockup_action { 112 NONE, 113 REBOOT, 114 PANIC 115 }; 116 117 static enum pqi_lockup_action pqi_lockup_action = NONE; 118 119 static struct { 120 enum pqi_lockup_action action; 121 char *name; 122 } pqi_lockup_actions[] = { 123 { 124 .action = NONE, 125 .name = "none", 126 }, 127 { 128 .action = REBOOT, 129 .name = "reboot", 130 }, 131 { 132 .action = PANIC, 133 .name = "panic", 134 }, 135 }; 136 137 static unsigned int pqi_supported_event_types[] = { 138 PQI_EVENT_TYPE_HOTPLUG, 139 PQI_EVENT_TYPE_HARDWARE, 140 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 141 PQI_EVENT_TYPE_LOGICAL_DEVICE, 142 PQI_EVENT_TYPE_OFA, 143 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 144 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 145 }; 146 147 static int pqi_disable_device_id_wildcards; 148 module_param_named(disable_device_id_wildcards, 149 pqi_disable_device_id_wildcards, int, 0644); 150 MODULE_PARM_DESC(disable_device_id_wildcards, 151 "Disable device ID wildcards."); 152 153 static int pqi_disable_heartbeat; 154 module_param_named(disable_heartbeat, 155 pqi_disable_heartbeat, int, 0644); 156 MODULE_PARM_DESC(disable_heartbeat, 157 "Disable heartbeat."); 158 159 static int pqi_disable_ctrl_shutdown; 160 module_param_named(disable_ctrl_shutdown, 161 pqi_disable_ctrl_shutdown, int, 0644); 162 MODULE_PARM_DESC(disable_ctrl_shutdown, 163 "Disable controller shutdown when controller locked up."); 164 165 static char *pqi_lockup_action_param; 166 module_param_named(lockup_action, 167 pqi_lockup_action_param, charp, 0644); 168 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 169 "\t\tSupported: none, reboot, panic\n" 170 "\t\tDefault: none"); 171 172 static int pqi_expose_ld_first; 173 module_param_named(expose_ld_first, 174 pqi_expose_ld_first, int, 0644); 175 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives."); 176 177 static int pqi_hide_vsep; 178 module_param_named(hide_vsep, 179 pqi_hide_vsep, int, 0644); 180 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives."); 181 182 static int pqi_disable_managed_interrupts; 183 module_param_named(disable_managed_interrupts, 184 pqi_disable_managed_interrupts, int, 0644); 185 MODULE_PARM_DESC(disable_managed_interrupts, 186 "Disable the kernel automatically assigning SMP affinity to IRQs."); 187 188 static unsigned int pqi_ctrl_ready_timeout_secs; 189 module_param_named(ctrl_ready_timeout, 190 pqi_ctrl_ready_timeout_secs, uint, 0644); 191 MODULE_PARM_DESC(ctrl_ready_timeout, 192 "Timeout in seconds for driver to wait for controller ready."); 193 194 static char *raid_levels[] = { 195 "RAID-0", 196 "RAID-4", 197 "RAID-1(1+0)", 198 "RAID-5", 199 "RAID-5+1", 200 "RAID-6", 201 "RAID-1(Triple)", 202 }; 203 204 static char *pqi_raid_level_to_string(u8 raid_level) 205 { 206 if (raid_level < ARRAY_SIZE(raid_levels)) 207 return raid_levels[raid_level]; 208 209 return "RAID UNKNOWN"; 210 } 211 212 #define SA_RAID_0 0 213 #define SA_RAID_4 1 214 #define SA_RAID_1 2 /* also used for RAID 10 */ 215 #define SA_RAID_5 3 /* also used for RAID 50 */ 216 #define SA_RAID_51 4 217 #define SA_RAID_6 5 /* also used for RAID 60 */ 218 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */ 219 #define SA_RAID_MAX SA_RAID_TRIPLE 220 #define SA_RAID_UNKNOWN 0xff 221 222 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 223 { 224 pqi_prep_for_scsi_done(scmd); 225 scsi_done(scmd); 226 } 227 228 static inline void pqi_disable_write_same(struct scsi_device *sdev) 229 { 230 sdev->no_write_same = 1; 231 } 232 233 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 234 { 235 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 236 } 237 238 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 239 { 240 return !device->is_physical_device; 241 } 242 243 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 244 { 245 return scsi3addr[2] != 0; 246 } 247 248 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 249 { 250 return !ctrl_info->controller_online; 251 } 252 253 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 254 { 255 if (ctrl_info->controller_online) 256 if (!sis_is_firmware_running(ctrl_info)) 257 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP); 258 } 259 260 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 261 { 262 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 263 } 264 265 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1 266 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2 267 268 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) 269 { 270 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE; 271 } 272 273 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 274 enum pqi_ctrl_mode mode) 275 { 276 u32 driver_scratch; 277 278 driver_scratch = sis_read_driver_scratch(ctrl_info); 279 280 if (mode == PQI_MODE) 281 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE; 282 else 283 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE; 284 285 sis_write_driver_scratch(ctrl_info, driver_scratch); 286 } 287 288 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info) 289 { 290 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0; 291 } 292 293 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported) 294 { 295 u32 driver_scratch; 296 297 driver_scratch = sis_read_driver_scratch(ctrl_info); 298 299 if (is_supported) 300 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; 301 else 302 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; 303 304 sis_write_driver_scratch(ctrl_info, driver_scratch); 305 } 306 307 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) 308 { 309 ctrl_info->scan_blocked = true; 310 mutex_lock(&ctrl_info->scan_mutex); 311 } 312 313 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info) 314 { 315 ctrl_info->scan_blocked = false; 316 mutex_unlock(&ctrl_info->scan_mutex); 317 } 318 319 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info) 320 { 321 return ctrl_info->scan_blocked; 322 } 323 324 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) 325 { 326 mutex_lock(&ctrl_info->lun_reset_mutex); 327 } 328 329 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info) 330 { 331 mutex_unlock(&ctrl_info->lun_reset_mutex); 332 } 333 334 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info) 335 { 336 struct Scsi_Host *shost; 337 unsigned int num_loops; 338 int msecs_sleep; 339 340 shost = ctrl_info->scsi_host; 341 342 scsi_block_requests(shost); 343 344 num_loops = 0; 345 msecs_sleep = 20; 346 while (scsi_host_busy(shost)) { 347 num_loops++; 348 if (num_loops == 10) 349 msecs_sleep = 500; 350 msleep(msecs_sleep); 351 } 352 } 353 354 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info) 355 { 356 scsi_unblock_requests(ctrl_info->scsi_host); 357 } 358 359 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) 360 { 361 atomic_inc(&ctrl_info->num_busy_threads); 362 } 363 364 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) 365 { 366 atomic_dec(&ctrl_info->num_busy_threads); 367 } 368 369 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 370 { 371 return ctrl_info->block_requests; 372 } 373 374 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 375 { 376 ctrl_info->block_requests = true; 377 } 378 379 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 380 { 381 ctrl_info->block_requests = false; 382 wake_up_all(&ctrl_info->block_requests_wait); 383 } 384 385 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 386 { 387 if (!pqi_ctrl_blocked(ctrl_info)) 388 return; 389 390 atomic_inc(&ctrl_info->num_blocked_threads); 391 wait_event(ctrl_info->block_requests_wait, 392 !pqi_ctrl_blocked(ctrl_info)); 393 atomic_dec(&ctrl_info->num_blocked_threads); 394 } 395 396 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10 397 398 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 399 { 400 unsigned long start_jiffies; 401 unsigned long warning_timeout; 402 bool displayed_warning; 403 404 displayed_warning = false; 405 start_jiffies = jiffies; 406 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 407 408 while (atomic_read(&ctrl_info->num_busy_threads) > 409 atomic_read(&ctrl_info->num_blocked_threads)) { 410 if (time_after(jiffies, warning_timeout)) { 411 dev_warn(&ctrl_info->pci_dev->dev, 412 "waiting %u seconds for driver activity to quiesce\n", 413 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 414 displayed_warning = true; 415 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies; 416 } 417 usleep_range(1000, 2000); 418 } 419 420 if (displayed_warning) 421 dev_warn(&ctrl_info->pci_dev->dev, 422 "driver activity quiesced after waiting for %u seconds\n", 423 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 424 } 425 426 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 427 { 428 return device->device_offline; 429 } 430 431 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) 432 { 433 mutex_lock(&ctrl_info->ofa_mutex); 434 } 435 436 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) 437 { 438 mutex_unlock(&ctrl_info->ofa_mutex); 439 } 440 441 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) 442 { 443 mutex_lock(&ctrl_info->ofa_mutex); 444 mutex_unlock(&ctrl_info->ofa_mutex); 445 } 446 447 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info) 448 { 449 return mutex_is_locked(&ctrl_info->ofa_mutex); 450 } 451 452 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) 453 { 454 device->in_remove = true; 455 } 456 457 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device) 458 { 459 return device->in_remove; 460 } 461 462 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun) 463 { 464 device->in_reset[lun] = true; 465 } 466 467 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun) 468 { 469 device->in_reset[lun] = false; 470 } 471 472 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun) 473 { 474 return device->in_reset[lun]; 475 } 476 477 static inline int pqi_event_type_to_event_index(unsigned int event_type) 478 { 479 int index; 480 481 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 482 if (event_type == pqi_supported_event_types[index]) 483 return index; 484 485 return -1; 486 } 487 488 static inline bool pqi_is_supported_event(unsigned int event_type) 489 { 490 return pqi_event_type_to_event_index(event_type) != -1; 491 } 492 493 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, 494 unsigned long delay) 495 { 496 if (pqi_ctrl_offline(ctrl_info)) 497 return; 498 499 schedule_delayed_work(&ctrl_info->rescan_work, delay); 500 } 501 502 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 503 { 504 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 505 } 506 507 #define PQI_RESCAN_WORK_DELAY (10 * HZ) 508 509 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) 510 { 511 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 512 } 513 514 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 515 { 516 cancel_delayed_work_sync(&ctrl_info->rescan_work); 517 } 518 519 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 520 { 521 if (!ctrl_info->heartbeat_counter) 522 return 0; 523 524 return readl(ctrl_info->heartbeat_counter); 525 } 526 527 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 528 { 529 return readb(ctrl_info->soft_reset_status); 530 } 531 532 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 533 { 534 u8 status; 535 536 status = pqi_read_soft_reset_status(ctrl_info); 537 status &= ~PQI_SOFT_RESET_ABORT; 538 writeb(status, ctrl_info->soft_reset_status); 539 } 540 541 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd) 542 { 543 bool io_high_prio; 544 int priority_class; 545 546 io_high_prio = false; 547 548 if (device->ncq_prio_enable) { 549 priority_class = 550 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd))); 551 if (priority_class == IOPRIO_CLASS_RT) { 552 /* Set NCQ priority for read/write commands. */ 553 switch (scmd->cmnd[0]) { 554 case WRITE_16: 555 case READ_16: 556 case WRITE_12: 557 case READ_12: 558 case WRITE_10: 559 case READ_10: 560 case WRITE_6: 561 case READ_6: 562 io_high_prio = true; 563 break; 564 } 565 } 566 } 567 568 return io_high_prio; 569 } 570 571 static int pqi_map_single(struct pci_dev *pci_dev, 572 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 573 size_t buffer_length, enum dma_data_direction data_direction) 574 { 575 dma_addr_t bus_address; 576 577 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) 578 return 0; 579 580 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, 581 data_direction); 582 if (dma_mapping_error(&pci_dev->dev, bus_address)) 583 return -ENOMEM; 584 585 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 586 put_unaligned_le32(buffer_length, &sg_descriptor->length); 587 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 588 589 return 0; 590 } 591 592 static void pqi_pci_unmap(struct pci_dev *pci_dev, 593 struct pqi_sg_descriptor *descriptors, int num_descriptors, 594 enum dma_data_direction data_direction) 595 { 596 int i; 597 598 if (data_direction == DMA_NONE) 599 return; 600 601 for (i = 0; i < num_descriptors; i++) 602 dma_unmap_single(&pci_dev->dev, 603 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 604 get_unaligned_le32(&descriptors[i].length), 605 data_direction); 606 } 607 608 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 609 struct pqi_raid_path_request *request, u8 cmd, 610 u8 *scsi3addr, void *buffer, size_t buffer_length, 611 u16 vpd_page, enum dma_data_direction *dir) 612 { 613 u8 *cdb; 614 size_t cdb_length = buffer_length; 615 616 memset(request, 0, sizeof(*request)); 617 618 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 619 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 620 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 621 &request->header.iu_length); 622 put_unaligned_le32(buffer_length, &request->buffer_length); 623 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 624 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 625 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 626 627 cdb = request->cdb; 628 629 switch (cmd) { 630 case INQUIRY: 631 request->data_direction = SOP_READ_FLAG; 632 cdb[0] = INQUIRY; 633 if (vpd_page & VPD_PAGE) { 634 cdb[1] = 0x1; 635 cdb[2] = (u8)vpd_page; 636 } 637 cdb[4] = (u8)cdb_length; 638 break; 639 case CISS_REPORT_LOG: 640 case CISS_REPORT_PHYS: 641 request->data_direction = SOP_READ_FLAG; 642 cdb[0] = cmd; 643 if (cmd == CISS_REPORT_PHYS) { 644 if (ctrl_info->rpl_extended_format_4_5_supported) 645 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4; 646 else 647 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2; 648 } else { 649 cdb[1] = ctrl_info->ciss_report_log_flags; 650 } 651 put_unaligned_be32(cdb_length, &cdb[6]); 652 break; 653 case CISS_GET_RAID_MAP: 654 request->data_direction = SOP_READ_FLAG; 655 cdb[0] = CISS_READ; 656 cdb[1] = CISS_GET_RAID_MAP; 657 put_unaligned_be32(cdb_length, &cdb[6]); 658 break; 659 case SA_FLUSH_CACHE: 660 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST; 661 request->data_direction = SOP_WRITE_FLAG; 662 cdb[0] = BMIC_WRITE; 663 cdb[6] = BMIC_FLUSH_CACHE; 664 put_unaligned_be16(cdb_length, &cdb[7]); 665 break; 666 case BMIC_SENSE_DIAG_OPTIONS: 667 cdb_length = 0; 668 fallthrough; 669 case BMIC_IDENTIFY_CONTROLLER: 670 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 671 case BMIC_SENSE_SUBSYSTEM_INFORMATION: 672 case BMIC_SENSE_FEATURE: 673 request->data_direction = SOP_READ_FLAG; 674 cdb[0] = BMIC_READ; 675 cdb[6] = cmd; 676 put_unaligned_be16(cdb_length, &cdb[7]); 677 break; 678 case BMIC_SET_DIAG_OPTIONS: 679 cdb_length = 0; 680 fallthrough; 681 case BMIC_WRITE_HOST_WELLNESS: 682 request->data_direction = SOP_WRITE_FLAG; 683 cdb[0] = BMIC_WRITE; 684 cdb[6] = cmd; 685 put_unaligned_be16(cdb_length, &cdb[7]); 686 break; 687 case BMIC_CSMI_PASSTHRU: 688 request->data_direction = SOP_BIDIRECTIONAL; 689 cdb[0] = BMIC_WRITE; 690 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; 691 cdb[6] = cmd; 692 put_unaligned_be16(cdb_length, &cdb[7]); 693 break; 694 default: 695 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); 696 break; 697 } 698 699 switch (request->data_direction) { 700 case SOP_READ_FLAG: 701 *dir = DMA_FROM_DEVICE; 702 break; 703 case SOP_WRITE_FLAG: 704 *dir = DMA_TO_DEVICE; 705 break; 706 case SOP_NO_DIRECTION_FLAG: 707 *dir = DMA_NONE; 708 break; 709 default: 710 *dir = DMA_BIDIRECTIONAL; 711 break; 712 } 713 714 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 715 buffer, buffer_length, *dir); 716 } 717 718 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 719 { 720 io_request->scmd = NULL; 721 io_request->status = 0; 722 io_request->error_info = NULL; 723 io_request->raid_bypass = false; 724 } 725 726 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) 727 { 728 struct pqi_io_request *io_request; 729 u16 i; 730 731 if (scmd) { /* SML I/O request */ 732 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 733 734 i = blk_mq_unique_tag_to_tag(blk_tag); 735 io_request = &ctrl_info->io_request_pool[i]; 736 if (atomic_inc_return(&io_request->refcount) > 1) { 737 atomic_dec(&io_request->refcount); 738 return NULL; 739 } 740 } else { /* IOCTL or driver internal request */ 741 /* 742 * benignly racy - may have to wait for an open slot. 743 * command slot range is scsi_ml_can_queue - 744 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)] 745 */ 746 i = 0; 747 while (1) { 748 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i]; 749 if (atomic_inc_return(&io_request->refcount) == 1) 750 break; 751 atomic_dec(&io_request->refcount); 752 i = (i + 1) % PQI_RESERVED_IO_SLOTS; 753 } 754 } 755 756 if (io_request) 757 pqi_reinit_io_request(io_request); 758 759 return io_request; 760 } 761 762 static void pqi_free_io_request(struct pqi_io_request *io_request) 763 { 764 atomic_dec(&io_request->refcount); 765 } 766 767 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, 768 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, 769 struct pqi_raid_error_info *error_info) 770 { 771 int rc; 772 struct pqi_raid_path_request request; 773 enum dma_data_direction dir; 774 775 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, 776 buffer, buffer_length, vpd_page, &dir); 777 if (rc) 778 return rc; 779 780 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info); 781 782 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 783 784 return rc; 785 } 786 787 /* helper functions for pqi_send_scsi_raid_request */ 788 789 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, 790 u8 cmd, void *buffer, size_t buffer_length) 791 { 792 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 793 buffer, buffer_length, 0, NULL); 794 } 795 796 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, 797 u8 cmd, void *buffer, size_t buffer_length, 798 struct pqi_raid_error_info *error_info) 799 { 800 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 801 buffer, buffer_length, 0, error_info); 802 } 803 804 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 805 struct bmic_identify_controller *buffer) 806 { 807 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, 808 buffer, sizeof(*buffer)); 809 } 810 811 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, 812 struct bmic_sense_subsystem_info *sense_info) 813 { 814 return pqi_send_ctrl_raid_request(ctrl_info, 815 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, 816 sizeof(*sense_info)); 817 } 818 819 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 820 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 821 { 822 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, 823 buffer, buffer_length, vpd_page, NULL); 824 } 825 826 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 827 struct pqi_scsi_dev *device, 828 struct bmic_identify_physical_device *buffer, size_t buffer_length) 829 { 830 int rc; 831 enum dma_data_direction dir; 832 u16 bmic_device_index; 833 struct pqi_raid_path_request request; 834 835 rc = pqi_build_raid_path_request(ctrl_info, &request, 836 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 837 buffer_length, 0, &dir); 838 if (rc) 839 return rc; 840 841 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 842 request.cdb[2] = (u8)bmic_device_index; 843 request.cdb[9] = (u8)(bmic_device_index >> 8); 844 845 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 846 847 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 848 849 return rc; 850 } 851 852 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit) 853 { 854 u32 bytes; 855 856 bytes = get_unaligned_le16(limit); 857 if (bytes == 0) 858 bytes = ~0; 859 else 860 bytes *= 1024; 861 862 return bytes; 863 } 864 865 #pragma pack(1) 866 867 struct bmic_sense_feature_buffer { 868 struct bmic_sense_feature_buffer_header header; 869 struct bmic_sense_feature_io_page_aio_subpage aio_subpage; 870 }; 871 872 #pragma pack() 873 874 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \ 875 offsetofend(struct bmic_sense_feature_buffer, \ 876 aio_subpage.max_write_raid_1_10_3drive) 877 878 #define MINIMUM_AIO_SUBPAGE_LENGTH \ 879 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \ 880 max_write_raid_1_10_3drive) - \ 881 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header)) 882 883 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) 884 { 885 int rc; 886 enum dma_data_direction dir; 887 struct pqi_raid_path_request request; 888 struct bmic_sense_feature_buffer *buffer; 889 890 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL); 891 if (!buffer) 892 return -ENOMEM; 893 894 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, 895 buffer, sizeof(*buffer), 0, &dir); 896 if (rc) 897 goto error; 898 899 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE; 900 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE; 901 902 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 903 904 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 905 906 if (rc) 907 goto error; 908 909 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE || 910 buffer->header.subpage_code != 911 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || 912 get_unaligned_le16(&buffer->header.buffer_length) < 913 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH || 914 buffer->aio_subpage.header.page_code != 915 BMIC_SENSE_FEATURE_IO_PAGE || 916 buffer->aio_subpage.header.subpage_code != 917 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || 918 get_unaligned_le16(&buffer->aio_subpage.header.page_length) < 919 MINIMUM_AIO_SUBPAGE_LENGTH) { 920 goto error; 921 } 922 923 ctrl_info->max_transfer_encrypted_sas_sata = 924 pqi_aio_limit_to_bytes( 925 &buffer->aio_subpage.max_transfer_encrypted_sas_sata); 926 927 ctrl_info->max_transfer_encrypted_nvme = 928 pqi_aio_limit_to_bytes( 929 &buffer->aio_subpage.max_transfer_encrypted_nvme); 930 931 ctrl_info->max_write_raid_5_6 = 932 pqi_aio_limit_to_bytes( 933 &buffer->aio_subpage.max_write_raid_5_6); 934 935 ctrl_info->max_write_raid_1_10_2drive = 936 pqi_aio_limit_to_bytes( 937 &buffer->aio_subpage.max_write_raid_1_10_2drive); 938 939 ctrl_info->max_write_raid_1_10_3drive = 940 pqi_aio_limit_to_bytes( 941 &buffer->aio_subpage.max_write_raid_1_10_3drive); 942 943 error: 944 kfree(buffer); 945 946 return rc; 947 } 948 949 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 950 enum bmic_flush_cache_shutdown_event shutdown_event) 951 { 952 int rc; 953 struct bmic_flush_cache *flush_cache; 954 955 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 956 if (!flush_cache) 957 return -ENOMEM; 958 959 flush_cache->shutdown_event = shutdown_event; 960 961 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, 962 sizeof(*flush_cache)); 963 964 kfree(flush_cache); 965 966 return rc; 967 } 968 969 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, 970 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, 971 struct pqi_raid_error_info *error_info) 972 { 973 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, 974 buffer, buffer_length, error_info); 975 } 976 977 #define PQI_FETCH_PTRAID_DATA (1 << 31) 978 979 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) 980 { 981 int rc; 982 struct bmic_diag_options *diag; 983 984 diag = kzalloc(sizeof(*diag), GFP_KERNEL); 985 if (!diag) 986 return -ENOMEM; 987 988 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, 989 diag, sizeof(*diag)); 990 if (rc) 991 goto out; 992 993 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); 994 995 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, 996 sizeof(*diag)); 997 998 out: 999 kfree(diag); 1000 1001 return rc; 1002 } 1003 1004 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 1005 void *buffer, size_t buffer_length) 1006 { 1007 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, 1008 buffer, buffer_length); 1009 } 1010 1011 #pragma pack(1) 1012 1013 struct bmic_host_wellness_driver_version { 1014 u8 start_tag[4]; 1015 u8 driver_version_tag[2]; 1016 __le16 driver_version_length; 1017 char driver_version[32]; 1018 u8 dont_write_tag[2]; 1019 u8 end_tag[2]; 1020 }; 1021 1022 #pragma pack() 1023 1024 static int pqi_write_driver_version_to_host_wellness( 1025 struct pqi_ctrl_info *ctrl_info) 1026 { 1027 int rc; 1028 struct bmic_host_wellness_driver_version *buffer; 1029 size_t buffer_length; 1030 1031 buffer_length = sizeof(*buffer); 1032 1033 buffer = kmalloc(buffer_length, GFP_KERNEL); 1034 if (!buffer) 1035 return -ENOMEM; 1036 1037 buffer->start_tag[0] = '<'; 1038 buffer->start_tag[1] = 'H'; 1039 buffer->start_tag[2] = 'W'; 1040 buffer->start_tag[3] = '>'; 1041 buffer->driver_version_tag[0] = 'D'; 1042 buffer->driver_version_tag[1] = 'V'; 1043 put_unaligned_le16(sizeof(buffer->driver_version), 1044 &buffer->driver_version_length); 1045 strscpy(buffer->driver_version, "Linux " DRIVER_VERSION, 1046 sizeof(buffer->driver_version)); 1047 buffer->dont_write_tag[0] = 'D'; 1048 buffer->dont_write_tag[1] = 'W'; 1049 buffer->end_tag[0] = 'Z'; 1050 buffer->end_tag[1] = 'Z'; 1051 1052 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 1053 1054 kfree(buffer); 1055 1056 return rc; 1057 } 1058 1059 #pragma pack(1) 1060 1061 struct bmic_host_wellness_time { 1062 u8 start_tag[4]; 1063 u8 time_tag[2]; 1064 __le16 time_length; 1065 u8 time[8]; 1066 u8 dont_write_tag[2]; 1067 u8 end_tag[2]; 1068 }; 1069 1070 #pragma pack() 1071 1072 static int pqi_write_current_time_to_host_wellness( 1073 struct pqi_ctrl_info *ctrl_info) 1074 { 1075 int rc; 1076 struct bmic_host_wellness_time *buffer; 1077 size_t buffer_length; 1078 time64_t local_time; 1079 unsigned int year; 1080 struct tm tm; 1081 1082 buffer_length = sizeof(*buffer); 1083 1084 buffer = kmalloc(buffer_length, GFP_KERNEL); 1085 if (!buffer) 1086 return -ENOMEM; 1087 1088 buffer->start_tag[0] = '<'; 1089 buffer->start_tag[1] = 'H'; 1090 buffer->start_tag[2] = 'W'; 1091 buffer->start_tag[3] = '>'; 1092 buffer->time_tag[0] = 'T'; 1093 buffer->time_tag[1] = 'D'; 1094 put_unaligned_le16(sizeof(buffer->time), 1095 &buffer->time_length); 1096 1097 local_time = ktime_get_real_seconds(); 1098 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 1099 year = tm.tm_year + 1900; 1100 1101 buffer->time[0] = bin2bcd(tm.tm_hour); 1102 buffer->time[1] = bin2bcd(tm.tm_min); 1103 buffer->time[2] = bin2bcd(tm.tm_sec); 1104 buffer->time[3] = 0; 1105 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 1106 buffer->time[5] = bin2bcd(tm.tm_mday); 1107 buffer->time[6] = bin2bcd(year / 100); 1108 buffer->time[7] = bin2bcd(year % 100); 1109 1110 buffer->dont_write_tag[0] = 'D'; 1111 buffer->dont_write_tag[1] = 'W'; 1112 buffer->end_tag[0] = 'Z'; 1113 buffer->end_tag[1] = 'Z'; 1114 1115 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 1116 1117 kfree(buffer); 1118 1119 return rc; 1120 } 1121 1122 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) 1123 1124 static void pqi_update_time_worker(struct work_struct *work) 1125 { 1126 int rc; 1127 struct pqi_ctrl_info *ctrl_info; 1128 1129 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1130 update_time_work); 1131 1132 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 1133 if (rc) 1134 dev_warn(&ctrl_info->pci_dev->dev, 1135 "error updating time on controller\n"); 1136 1137 schedule_delayed_work(&ctrl_info->update_time_work, 1138 PQI_UPDATE_TIME_WORK_INTERVAL); 1139 } 1140 1141 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) 1142 { 1143 schedule_delayed_work(&ctrl_info->update_time_work, 0); 1144 } 1145 1146 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) 1147 { 1148 cancel_delayed_work_sync(&ctrl_info->update_time_work); 1149 } 1150 1151 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, 1152 size_t buffer_length) 1153 { 1154 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); 1155 } 1156 1157 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) 1158 { 1159 int rc; 1160 size_t lun_list_length; 1161 size_t lun_data_length; 1162 size_t new_lun_list_length; 1163 void *lun_data = NULL; 1164 struct report_lun_header *report_lun_header; 1165 1166 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 1167 if (!report_lun_header) { 1168 rc = -ENOMEM; 1169 goto out; 1170 } 1171 1172 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header)); 1173 if (rc) 1174 goto out; 1175 1176 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 1177 1178 again: 1179 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 1180 1181 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 1182 if (!lun_data) { 1183 rc = -ENOMEM; 1184 goto out; 1185 } 1186 1187 if (lun_list_length == 0) { 1188 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 1189 goto out; 1190 } 1191 1192 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 1193 if (rc) 1194 goto out; 1195 1196 new_lun_list_length = 1197 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length); 1198 1199 if (new_lun_list_length > lun_list_length) { 1200 lun_list_length = new_lun_list_length; 1201 kfree(lun_data); 1202 goto again; 1203 } 1204 1205 out: 1206 kfree(report_lun_header); 1207 1208 if (rc) { 1209 kfree(lun_data); 1210 lun_data = NULL; 1211 } 1212 1213 *buffer = lun_data; 1214 1215 return rc; 1216 } 1217 1218 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) 1219 { 1220 int rc; 1221 unsigned int i; 1222 u8 rpl_response_format; 1223 u32 num_physicals; 1224 void *rpl_list; 1225 struct report_lun_header *rpl_header; 1226 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list; 1227 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list; 1228 1229 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list); 1230 if (rc) 1231 return rc; 1232 1233 if (ctrl_info->rpl_extended_format_4_5_supported) { 1234 rpl_header = rpl_list; 1235 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK; 1236 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) { 1237 *buffer = rpl_list; 1238 return 0; 1239 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) { 1240 dev_err(&ctrl_info->pci_dev->dev, 1241 "RPL returned unsupported data format %u\n", 1242 rpl_response_format); 1243 return -EINVAL; 1244 } else { 1245 dev_warn(&ctrl_info->pci_dev->dev, 1246 "RPL returned extended format 2 instead of 4\n"); 1247 } 1248 } 1249 1250 rpl_8byte_wwid_list = rpl_list; 1251 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]); 1252 1253 rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries, 1254 num_physicals), GFP_KERNEL); 1255 if (!rpl_16byte_wwid_list) 1256 return -ENOMEM; 1257 1258 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid), 1259 &rpl_16byte_wwid_list->header.list_length); 1260 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags; 1261 1262 for (i = 0; i < num_physicals; i++) { 1263 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid)); 1264 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid)); 1265 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8); 1266 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type; 1267 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags; 1268 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count; 1269 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths; 1270 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle; 1271 } 1272 1273 kfree(rpl_8byte_wwid_list); 1274 *buffer = rpl_16byte_wwid_list; 1275 1276 return 0; 1277 } 1278 1279 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) 1280 { 1281 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 1282 } 1283 1284 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 1285 struct report_phys_lun_16byte_wwid_list **physdev_list, 1286 struct report_log_lun_list **logdev_list) 1287 { 1288 int rc; 1289 size_t logdev_list_length; 1290 size_t logdev_data_length; 1291 struct report_log_lun_list *internal_logdev_list; 1292 struct report_log_lun_list *logdev_data; 1293 struct report_lun_header report_lun_header; 1294 1295 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 1296 if (rc) 1297 dev_err(&ctrl_info->pci_dev->dev, 1298 "report physical LUNs failed\n"); 1299 1300 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 1301 if (rc) 1302 dev_err(&ctrl_info->pci_dev->dev, 1303 "report logical LUNs failed\n"); 1304 1305 /* 1306 * Tack the controller itself onto the end of the logical device list 1307 * by adding a list entry that is all zeros. 1308 */ 1309 1310 logdev_data = *logdev_list; 1311 1312 if (logdev_data) { 1313 logdev_list_length = 1314 get_unaligned_be32(&logdev_data->header.list_length); 1315 } else { 1316 memset(&report_lun_header, 0, sizeof(report_lun_header)); 1317 logdev_data = 1318 (struct report_log_lun_list *)&report_lun_header; 1319 logdev_list_length = 0; 1320 } 1321 1322 logdev_data_length = sizeof(struct report_lun_header) + 1323 logdev_list_length; 1324 1325 internal_logdev_list = kmalloc(logdev_data_length + 1326 sizeof(struct report_log_lun), GFP_KERNEL); 1327 if (!internal_logdev_list) { 1328 kfree(*logdev_list); 1329 *logdev_list = NULL; 1330 return -ENOMEM; 1331 } 1332 1333 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 1334 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 1335 sizeof(struct report_log_lun)); 1336 put_unaligned_be32(logdev_list_length + 1337 sizeof(struct report_log_lun), 1338 &internal_logdev_list->header.list_length); 1339 1340 kfree(*logdev_list); 1341 *logdev_list = internal_logdev_list; 1342 1343 return 0; 1344 } 1345 1346 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 1347 int bus, int target, int lun) 1348 { 1349 device->bus = bus; 1350 device->target = target; 1351 device->lun = lun; 1352 } 1353 1354 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 1355 { 1356 u8 *scsi3addr; 1357 u32 lunid; 1358 int bus; 1359 int target; 1360 int lun; 1361 1362 scsi3addr = device->scsi3addr; 1363 lunid = get_unaligned_le32(scsi3addr); 1364 1365 if (pqi_is_hba_lunid(scsi3addr)) { 1366 /* The specified device is the controller. */ 1367 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 1368 device->target_lun_valid = true; 1369 return; 1370 } 1371 1372 if (pqi_is_logical_device(device)) { 1373 if (device->is_external_raid_device) { 1374 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 1375 target = (lunid >> 16) & 0x3fff; 1376 lun = lunid & 0xff; 1377 } else { 1378 bus = PQI_RAID_VOLUME_BUS; 1379 target = 0; 1380 lun = lunid & 0x3fff; 1381 } 1382 pqi_set_bus_target_lun(device, bus, target, lun); 1383 device->target_lun_valid = true; 1384 return; 1385 } 1386 1387 /* 1388 * Defer target and LUN assignment for non-controller physical devices 1389 * because the SAS transport layer will make these assignments later. 1390 */ 1391 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 1392 } 1393 1394 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 1395 struct pqi_scsi_dev *device) 1396 { 1397 int rc; 1398 u8 raid_level; 1399 u8 *buffer; 1400 1401 raid_level = SA_RAID_UNKNOWN; 1402 1403 buffer = kmalloc(64, GFP_KERNEL); 1404 if (buffer) { 1405 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1406 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 1407 if (rc == 0) { 1408 raid_level = buffer[8]; 1409 if (raid_level > SA_RAID_MAX) 1410 raid_level = SA_RAID_UNKNOWN; 1411 } 1412 kfree(buffer); 1413 } 1414 1415 device->raid_level = raid_level; 1416 } 1417 1418 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1419 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1420 { 1421 char *err_msg; 1422 u32 raid_map_size; 1423 u32 r5or6_blocks_per_row; 1424 1425 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1426 1427 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1428 err_msg = "RAID map too small"; 1429 goto bad_raid_map; 1430 } 1431 1432 if (device->raid_level == SA_RAID_1) { 1433 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1434 err_msg = "invalid RAID-1 map"; 1435 goto bad_raid_map; 1436 } 1437 } else if (device->raid_level == SA_RAID_TRIPLE) { 1438 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1439 err_msg = "invalid RAID-1(Triple) map"; 1440 goto bad_raid_map; 1441 } 1442 } else if ((device->raid_level == SA_RAID_5 || 1443 device->raid_level == SA_RAID_6) && 1444 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1445 /* RAID 50/60 */ 1446 r5or6_blocks_per_row = 1447 get_unaligned_le16(&raid_map->strip_size) * 1448 get_unaligned_le16(&raid_map->data_disks_per_row); 1449 if (r5or6_blocks_per_row == 0) { 1450 err_msg = "invalid RAID-5 or RAID-6 map"; 1451 goto bad_raid_map; 1452 } 1453 } 1454 1455 return 0; 1456 1457 bad_raid_map: 1458 dev_warn(&ctrl_info->pci_dev->dev, 1459 "logical device %08x%08x %s\n", 1460 *((u32 *)&device->scsi3addr), 1461 *((u32 *)&device->scsi3addr[4]), err_msg); 1462 1463 return -EINVAL; 1464 } 1465 1466 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1467 struct pqi_scsi_dev *device) 1468 { 1469 int rc; 1470 u32 raid_map_size; 1471 struct raid_map *raid_map; 1472 1473 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1474 if (!raid_map) 1475 return -ENOMEM; 1476 1477 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1478 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL); 1479 if (rc) 1480 goto error; 1481 1482 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1483 1484 if (raid_map_size > sizeof(*raid_map)) { 1485 1486 kfree(raid_map); 1487 1488 raid_map = kmalloc(raid_map_size, GFP_KERNEL); 1489 if (!raid_map) 1490 return -ENOMEM; 1491 1492 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1493 device->scsi3addr, raid_map, raid_map_size, 0, NULL); 1494 if (rc) 1495 goto error; 1496 1497 if (get_unaligned_le32(&raid_map->structure_size) 1498 != raid_map_size) { 1499 dev_warn(&ctrl_info->pci_dev->dev, 1500 "requested %u bytes, received %u bytes\n", 1501 raid_map_size, 1502 get_unaligned_le32(&raid_map->structure_size)); 1503 rc = -EINVAL; 1504 goto error; 1505 } 1506 } 1507 1508 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1509 if (rc) 1510 goto error; 1511 1512 device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats); 1513 if (!device->raid_io_stats) { 1514 rc = -ENOMEM; 1515 goto error; 1516 } 1517 1518 device->raid_map = raid_map; 1519 1520 return 0; 1521 1522 error: 1523 kfree(raid_map); 1524 1525 return rc; 1526 } 1527 1528 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, 1529 struct pqi_scsi_dev *device) 1530 { 1531 if (!ctrl_info->lv_drive_type_mix_valid) { 1532 device->max_transfer_encrypted = ~0; 1533 return; 1534 } 1535 1536 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) { 1537 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY: 1538 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY: 1539 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY: 1540 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY: 1541 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY: 1542 case LV_DRIVE_TYPE_MIX_SAS_ONLY: 1543 case LV_DRIVE_TYPE_MIX_SATA_ONLY: 1544 device->max_transfer_encrypted = 1545 ctrl_info->max_transfer_encrypted_sas_sata; 1546 break; 1547 case LV_DRIVE_TYPE_MIX_NVME_ONLY: 1548 device->max_transfer_encrypted = 1549 ctrl_info->max_transfer_encrypted_nvme; 1550 break; 1551 case LV_DRIVE_TYPE_MIX_UNKNOWN: 1552 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION: 1553 default: 1554 device->max_transfer_encrypted = 1555 min(ctrl_info->max_transfer_encrypted_sas_sata, 1556 ctrl_info->max_transfer_encrypted_nvme); 1557 break; 1558 } 1559 } 1560 1561 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1562 struct pqi_scsi_dev *device) 1563 { 1564 int rc; 1565 u8 *buffer; 1566 u8 bypass_status; 1567 1568 buffer = kmalloc(64, GFP_KERNEL); 1569 if (!buffer) 1570 return; 1571 1572 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1573 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1574 if (rc) 1575 goto out; 1576 1577 #define RAID_BYPASS_STATUS 4 1578 #define RAID_BYPASS_CONFIGURED 0x1 1579 #define RAID_BYPASS_ENABLED 0x2 1580 1581 bypass_status = buffer[RAID_BYPASS_STATUS]; 1582 device->raid_bypass_configured = 1583 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1584 if (device->raid_bypass_configured && 1585 (bypass_status & RAID_BYPASS_ENABLED) && 1586 pqi_get_raid_map(ctrl_info, device) == 0) { 1587 device->raid_bypass_enabled = true; 1588 if (get_unaligned_le16(&device->raid_map->flags) & 1589 RAID_MAP_ENCRYPTION_ENABLED) 1590 pqi_set_max_transfer_encrypted(ctrl_info, device); 1591 } 1592 1593 out: 1594 kfree(buffer); 1595 } 1596 1597 /* 1598 * Use vendor-specific VPD to determine online/offline status of a volume. 1599 */ 1600 1601 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1602 struct pqi_scsi_dev *device) 1603 { 1604 int rc; 1605 size_t page_length; 1606 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1607 bool volume_offline = true; 1608 u32 volume_flags; 1609 struct ciss_vpd_logical_volume_status *vpd; 1610 1611 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1612 if (!vpd) 1613 goto no_buffer; 1614 1615 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1616 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1617 if (rc) 1618 goto out; 1619 1620 if (vpd->page_code != CISS_VPD_LV_STATUS) 1621 goto out; 1622 1623 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1624 volume_status) + vpd->page_length; 1625 if (page_length < sizeof(*vpd)) 1626 goto out; 1627 1628 volume_status = vpd->volume_status; 1629 volume_flags = get_unaligned_be32(&vpd->flags); 1630 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1631 1632 out: 1633 kfree(vpd); 1634 no_buffer: 1635 device->volume_status = volume_status; 1636 device->volume_offline = volume_offline; 1637 } 1638 1639 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01 1640 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10 1641 #define PQI_DEVICE_ERASE_IN_PROGRESS 0x10 1642 1643 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, 1644 struct pqi_scsi_dev *device, 1645 struct bmic_identify_physical_device *id_phys) 1646 { 1647 int rc; 1648 1649 memset(id_phys, 0, sizeof(*id_phys)); 1650 1651 rc = pqi_identify_physical_device(ctrl_info, device, 1652 id_phys, sizeof(*id_phys)); 1653 if (rc) { 1654 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1655 return rc; 1656 } 1657 1658 scsi_sanitize_inquiry_string(&id_phys->model[0], 8); 1659 scsi_sanitize_inquiry_string(&id_phys->model[8], 16); 1660 1661 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); 1662 memcpy(device->model, &id_phys->model[8], sizeof(device->model)); 1663 1664 device->box_index = id_phys->box_index; 1665 device->phys_box_on_bus = id_phys->phys_box_on_bus; 1666 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; 1667 device->queue_depth = 1668 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1669 device->active_path_index = id_phys->active_path_number; 1670 device->path_map = id_phys->redundant_path_present_map; 1671 memcpy(&device->box, 1672 &id_phys->alternate_paths_phys_box_on_port, 1673 sizeof(device->box)); 1674 memcpy(&device->phys_connector, 1675 &id_phys->alternate_paths_phys_connector, 1676 sizeof(device->phys_connector)); 1677 device->bay = id_phys->phys_bay_in_box; 1678 device->lun_count = id_phys->multi_lun_device_lun_count; 1679 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && 1680 id_phys->phy_count) 1681 device->phy_id = 1682 id_phys->phy_to_phy_map[device->active_path_index]; 1683 else 1684 device->phy_id = 0xFF; 1685 1686 device->ncq_prio_support = 1687 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) & 1688 PQI_DEVICE_NCQ_PRIO_SUPPORTED); 1689 1690 device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS); 1691 1692 return 0; 1693 } 1694 1695 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, 1696 struct pqi_scsi_dev *device) 1697 { 1698 int rc; 1699 u8 *buffer; 1700 1701 buffer = kmalloc(64, GFP_KERNEL); 1702 if (!buffer) 1703 return -ENOMEM; 1704 1705 /* Send an inquiry to the device to see what it is. */ 1706 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); 1707 if (rc) 1708 goto out; 1709 1710 scsi_sanitize_inquiry_string(&buffer[8], 8); 1711 scsi_sanitize_inquiry_string(&buffer[16], 16); 1712 1713 device->devtype = buffer[0] & 0x1f; 1714 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1715 memcpy(device->model, &buffer[16], sizeof(device->model)); 1716 1717 if (device->devtype == TYPE_DISK) { 1718 if (device->is_external_raid_device) { 1719 device->raid_level = SA_RAID_UNKNOWN; 1720 device->volume_status = CISS_LV_OK; 1721 device->volume_offline = false; 1722 } else { 1723 pqi_get_raid_level(ctrl_info, device); 1724 pqi_get_raid_bypass_status(ctrl_info, device); 1725 pqi_get_volume_status(ctrl_info, device); 1726 } 1727 } 1728 1729 out: 1730 kfree(buffer); 1731 1732 return rc; 1733 } 1734 1735 /* 1736 * Prevent adding drive to OS for some corner cases such as a drive 1737 * undergoing a sanitize (erase) operation. Some OSes will continue to poll 1738 * the drive until the sanitize completes, which can take hours, 1739 * resulting in long bootup delays. Commands such as TUR, READ_CAP 1740 * are allowed, but READ/WRITE cause check condition. So the OS 1741 * cannot check/read the partition table. 1742 * Note: devices that have completed sanitize must be re-enabled 1743 * using the management utility. 1744 */ 1745 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device) 1746 { 1747 return device->erase_in_progress; 1748 } 1749 1750 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, 1751 struct pqi_scsi_dev *device, 1752 struct bmic_identify_physical_device *id_phys) 1753 { 1754 int rc; 1755 1756 if (device->is_expander_smp_device) 1757 return 0; 1758 1759 if (pqi_is_logical_device(device)) 1760 rc = pqi_get_logical_device_info(ctrl_info, device); 1761 else 1762 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); 1763 1764 return rc; 1765 } 1766 1767 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1768 struct pqi_scsi_dev *device, 1769 struct bmic_identify_physical_device *id_phys) 1770 { 1771 int rc; 1772 1773 rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys); 1774 1775 if (rc == 0 && device->lun_count == 0) 1776 device->lun_count = 1; 1777 1778 return rc; 1779 } 1780 1781 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1782 struct pqi_scsi_dev *device) 1783 { 1784 char *status; 1785 static const char unknown_state_str[] = 1786 "Volume is in an unknown state (%u)"; 1787 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1788 1789 switch (device->volume_status) { 1790 case CISS_LV_OK: 1791 status = "Volume online"; 1792 break; 1793 case CISS_LV_FAILED: 1794 status = "Volume failed"; 1795 break; 1796 case CISS_LV_NOT_CONFIGURED: 1797 status = "Volume not configured"; 1798 break; 1799 case CISS_LV_DEGRADED: 1800 status = "Volume degraded"; 1801 break; 1802 case CISS_LV_READY_FOR_RECOVERY: 1803 status = "Volume ready for recovery operation"; 1804 break; 1805 case CISS_LV_UNDERGOING_RECOVERY: 1806 status = "Volume undergoing recovery"; 1807 break; 1808 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1809 status = "Wrong physical drive was replaced"; 1810 break; 1811 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1812 status = "A physical drive not properly connected"; 1813 break; 1814 case CISS_LV_HARDWARE_OVERHEATING: 1815 status = "Hardware is overheating"; 1816 break; 1817 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1818 status = "Hardware has overheated"; 1819 break; 1820 case CISS_LV_UNDERGOING_EXPANSION: 1821 status = "Volume undergoing expansion"; 1822 break; 1823 case CISS_LV_NOT_AVAILABLE: 1824 status = "Volume waiting for transforming volume"; 1825 break; 1826 case CISS_LV_QUEUED_FOR_EXPANSION: 1827 status = "Volume queued for expansion"; 1828 break; 1829 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1830 status = "Volume disabled due to SCSI ID conflict"; 1831 break; 1832 case CISS_LV_EJECTED: 1833 status = "Volume has been ejected"; 1834 break; 1835 case CISS_LV_UNDERGOING_ERASE: 1836 status = "Volume undergoing background erase"; 1837 break; 1838 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1839 status = "Volume ready for predictive spare rebuild"; 1840 break; 1841 case CISS_LV_UNDERGOING_RPI: 1842 status = "Volume undergoing rapid parity initialization"; 1843 break; 1844 case CISS_LV_PENDING_RPI: 1845 status = "Volume queued for rapid parity initialization"; 1846 break; 1847 case CISS_LV_ENCRYPTED_NO_KEY: 1848 status = "Encrypted volume inaccessible - key not present"; 1849 break; 1850 case CISS_LV_UNDERGOING_ENCRYPTION: 1851 status = "Volume undergoing encryption process"; 1852 break; 1853 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1854 status = "Volume undergoing encryption re-keying process"; 1855 break; 1856 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1857 status = "Volume encrypted but encryption is disabled"; 1858 break; 1859 case CISS_LV_PENDING_ENCRYPTION: 1860 status = "Volume pending migration to encrypted state"; 1861 break; 1862 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1863 status = "Volume pending encryption rekeying"; 1864 break; 1865 case CISS_LV_NOT_SUPPORTED: 1866 status = "Volume not supported on this controller"; 1867 break; 1868 case CISS_LV_STATUS_UNAVAILABLE: 1869 status = "Volume status not available"; 1870 break; 1871 default: 1872 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1873 unknown_state_str, device->volume_status); 1874 status = unknown_state_buffer; 1875 break; 1876 } 1877 1878 dev_info(&ctrl_info->pci_dev->dev, 1879 "scsi %d:%d:%d:%d %s\n", 1880 ctrl_info->scsi_host->host_no, 1881 device->bus, device->target, device->lun, status); 1882 } 1883 1884 static void pqi_rescan_worker(struct work_struct *work) 1885 { 1886 struct pqi_ctrl_info *ctrl_info; 1887 1888 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1889 rescan_work); 1890 1891 pqi_scan_scsi_devices(ctrl_info); 1892 } 1893 1894 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1895 struct pqi_scsi_dev *device) 1896 { 1897 int rc; 1898 1899 if (pqi_is_logical_device(device)) 1900 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1901 device->target, device->lun); 1902 else 1903 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1904 1905 return rc; 1906 } 1907 1908 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000) 1909 1910 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) 1911 { 1912 int rc; 1913 int lun; 1914 1915 for (lun = 0; lun < device->lun_count; lun++) { 1916 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, 1917 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS); 1918 if (rc) 1919 dev_err(&ctrl_info->pci_dev->dev, 1920 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n", 1921 ctrl_info->scsi_host->host_no, device->bus, 1922 device->target, lun, 1923 atomic_read(&device->scsi_cmds_outstanding[lun])); 1924 } 1925 1926 if (pqi_is_logical_device(device)) 1927 scsi_remove_device(device->sdev); 1928 else 1929 pqi_remove_sas_device(device); 1930 1931 pqi_device_remove_start(device); 1932 } 1933 1934 /* Assumes the SCSI device list lock is held. */ 1935 1936 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1937 int bus, int target, int lun) 1938 { 1939 struct pqi_scsi_dev *device; 1940 1941 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 1942 if (device->bus == bus && device->target == target && device->lun == lun) 1943 return device; 1944 1945 return NULL; 1946 } 1947 1948 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2) 1949 { 1950 if (dev1->is_physical_device != dev2->is_physical_device) 1951 return false; 1952 1953 if (dev1->is_physical_device) 1954 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0; 1955 1956 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0; 1957 } 1958 1959 enum pqi_find_result { 1960 DEVICE_NOT_FOUND, 1961 DEVICE_CHANGED, 1962 DEVICE_SAME, 1963 }; 1964 1965 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1966 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device) 1967 { 1968 struct pqi_scsi_dev *device; 1969 1970 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 1971 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) { 1972 *matching_device = device; 1973 if (pqi_device_equal(device_to_find, device)) { 1974 if (device_to_find->volume_offline) 1975 return DEVICE_CHANGED; 1976 return DEVICE_SAME; 1977 } 1978 return DEVICE_CHANGED; 1979 } 1980 } 1981 1982 return DEVICE_NOT_FOUND; 1983 } 1984 1985 static inline const char *pqi_device_type(struct pqi_scsi_dev *device) 1986 { 1987 if (device->is_expander_smp_device) 1988 return "Enclosure SMP "; 1989 1990 return scsi_device_type(device->devtype); 1991 } 1992 1993 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1994 1995 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1996 char *action, struct pqi_scsi_dev *device) 1997 { 1998 ssize_t count; 1999 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 2000 2001 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 2002 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 2003 2004 if (device->target_lun_valid) 2005 count += scnprintf(buffer + count, 2006 PQI_DEV_INFO_BUFFER_LENGTH - count, 2007 "%d:%d", 2008 device->target, 2009 device->lun); 2010 else 2011 count += scnprintf(buffer + count, 2012 PQI_DEV_INFO_BUFFER_LENGTH - count, 2013 "-:-"); 2014 2015 if (pqi_is_logical_device(device)) { 2016 count += scnprintf(buffer + count, 2017 PQI_DEV_INFO_BUFFER_LENGTH - count, 2018 " %08x%08x", 2019 *((u32 *)&device->scsi3addr), 2020 *((u32 *)&device->scsi3addr[4])); 2021 } else if (ctrl_info->rpl_extended_format_4_5_supported) { 2022 if (device->device_type == SA_DEVICE_TYPE_NVME) 2023 count += scnprintf(buffer + count, 2024 PQI_DEV_INFO_BUFFER_LENGTH - count, 2025 " %016llx%016llx", 2026 get_unaligned_be64(&device->wwid[0]), 2027 get_unaligned_be64(&device->wwid[8])); 2028 else 2029 count += scnprintf(buffer + count, 2030 PQI_DEV_INFO_BUFFER_LENGTH - count, 2031 " %016llx", 2032 get_unaligned_be64(&device->wwid[0])); 2033 } else { 2034 count += scnprintf(buffer + count, 2035 PQI_DEV_INFO_BUFFER_LENGTH - count, 2036 " %016llx", 2037 get_unaligned_be64(&device->wwid[0])); 2038 } 2039 2040 2041 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 2042 " %s %.8s %.16s ", 2043 pqi_device_type(device), 2044 device->vendor, 2045 device->model); 2046 2047 if (pqi_is_logical_device(device)) { 2048 if (device->devtype == TYPE_DISK) 2049 count += scnprintf(buffer + count, 2050 PQI_DEV_INFO_BUFFER_LENGTH - count, 2051 "SSDSmartPathCap%c En%c %-12s", 2052 device->raid_bypass_configured ? '+' : '-', 2053 device->raid_bypass_enabled ? '+' : '-', 2054 pqi_raid_level_to_string(device->raid_level)); 2055 } else { 2056 count += scnprintf(buffer + count, 2057 PQI_DEV_INFO_BUFFER_LENGTH - count, 2058 "AIO%c", device->aio_enabled ? '+' : '-'); 2059 if (device->devtype == TYPE_DISK || 2060 device->devtype == TYPE_ZBC) 2061 count += scnprintf(buffer + count, 2062 PQI_DEV_INFO_BUFFER_LENGTH - count, 2063 " qd=%-6d", device->queue_depth); 2064 } 2065 2066 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 2067 } 2068 2069 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2) 2070 { 2071 u32 raid_map1_size; 2072 u32 raid_map2_size; 2073 2074 if (raid_map1 == NULL || raid_map2 == NULL) 2075 return raid_map1 == raid_map2; 2076 2077 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size); 2078 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size); 2079 2080 if (raid_map1_size != raid_map2_size) 2081 return false; 2082 2083 return memcmp(raid_map1, raid_map2, raid_map1_size) == 0; 2084 } 2085 2086 /* Assumes the SCSI device list lock is held. */ 2087 2088 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, 2089 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device) 2090 { 2091 existing_device->device_type = new_device->device_type; 2092 existing_device->bus = new_device->bus; 2093 if (new_device->target_lun_valid) { 2094 existing_device->target = new_device->target; 2095 existing_device->lun = new_device->lun; 2096 existing_device->target_lun_valid = true; 2097 } 2098 2099 /* By definition, the scsi3addr and wwid fields are already the same. */ 2100 2101 existing_device->is_physical_device = new_device->is_physical_device; 2102 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor)); 2103 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model)); 2104 existing_device->sas_address = new_device->sas_address; 2105 existing_device->queue_depth = new_device->queue_depth; 2106 existing_device->device_offline = false; 2107 existing_device->lun_count = new_device->lun_count; 2108 2109 if (pqi_is_logical_device(existing_device)) { 2110 existing_device->is_external_raid_device = new_device->is_external_raid_device; 2111 2112 if (existing_device->devtype == TYPE_DISK) { 2113 existing_device->raid_level = new_device->raid_level; 2114 existing_device->volume_status = new_device->volume_status; 2115 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group)); 2116 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) { 2117 kfree(existing_device->raid_map); 2118 existing_device->raid_map = new_device->raid_map; 2119 /* To prevent this from being freed later. */ 2120 new_device->raid_map = NULL; 2121 } 2122 if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) { 2123 existing_device->raid_io_stats = new_device->raid_io_stats; 2124 new_device->raid_io_stats = NULL; 2125 } 2126 existing_device->raid_bypass_configured = new_device->raid_bypass_configured; 2127 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; 2128 } 2129 } else { 2130 existing_device->aio_enabled = new_device->aio_enabled; 2131 existing_device->aio_handle = new_device->aio_handle; 2132 existing_device->is_expander_smp_device = new_device->is_expander_smp_device; 2133 existing_device->active_path_index = new_device->active_path_index; 2134 existing_device->phy_id = new_device->phy_id; 2135 existing_device->path_map = new_device->path_map; 2136 existing_device->bay = new_device->bay; 2137 existing_device->box_index = new_device->box_index; 2138 existing_device->phys_box_on_bus = new_device->phys_box_on_bus; 2139 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; 2140 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); 2141 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector)); 2142 } 2143 } 2144 2145 static inline void pqi_free_device(struct pqi_scsi_dev *device) 2146 { 2147 if (device) { 2148 free_percpu(device->raid_io_stats); 2149 kfree(device->raid_map); 2150 kfree(device); 2151 } 2152 } 2153 2154 /* 2155 * Called when exposing a new device to the OS fails in order to re-adjust 2156 * our internal SCSI device list to match the SCSI ML's view. 2157 */ 2158 2159 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 2160 struct pqi_scsi_dev *device) 2161 { 2162 unsigned long flags; 2163 2164 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2165 list_del(&device->scsi_device_list_entry); 2166 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2167 2168 /* Allow the device structure to be freed later. */ 2169 device->keep_device = false; 2170 } 2171 2172 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) 2173 { 2174 if (device->is_expander_smp_device) 2175 return device->sas_port != NULL; 2176 2177 return device->sdev != NULL; 2178 } 2179 2180 static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device) 2181 { 2182 unsigned int lun; 2183 struct pqi_tmf_work *tmf_work; 2184 2185 for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) 2186 INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker); 2187 } 2188 2189 static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device) 2190 { 2191 if (pqi_device_in_remove(device)) 2192 return false; 2193 2194 if (device->sdev == NULL) 2195 return false; 2196 2197 if (!scsi_device_online(device->sdev)) 2198 return false; 2199 2200 return device->rescan; 2201 } 2202 2203 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 2204 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 2205 { 2206 int rc; 2207 unsigned int i; 2208 unsigned long flags; 2209 enum pqi_find_result find_result; 2210 struct pqi_scsi_dev *device; 2211 struct pqi_scsi_dev *next; 2212 struct pqi_scsi_dev *matching_device; 2213 LIST_HEAD(add_list); 2214 LIST_HEAD(delete_list); 2215 2216 /* 2217 * The idea here is to do as little work as possible while holding the 2218 * spinlock. That's why we go to great pains to defer anything other 2219 * than updating the internal device list until after we release the 2220 * spinlock. 2221 */ 2222 2223 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2224 2225 /* Assume that all devices in the existing list have gone away. */ 2226 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 2227 device->device_gone = true; 2228 2229 for (i = 0; i < num_new_devices; i++) { 2230 device = new_device_list[i]; 2231 2232 find_result = pqi_scsi_find_entry(ctrl_info, device, 2233 &matching_device); 2234 2235 switch (find_result) { 2236 case DEVICE_SAME: 2237 /* 2238 * The newly found device is already in the existing 2239 * device list. 2240 */ 2241 device->new_device = false; 2242 matching_device->device_gone = false; 2243 pqi_scsi_update_device(ctrl_info, matching_device, device); 2244 break; 2245 case DEVICE_NOT_FOUND: 2246 /* 2247 * The newly found device is NOT in the existing device 2248 * list. 2249 */ 2250 device->new_device = true; 2251 break; 2252 case DEVICE_CHANGED: 2253 /* 2254 * The original device has gone away and we need to add 2255 * the new device. 2256 */ 2257 device->new_device = true; 2258 break; 2259 } 2260 } 2261 2262 /* Process all devices that have gone away. */ 2263 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 2264 scsi_device_list_entry) { 2265 if (device->device_gone) { 2266 list_del(&device->scsi_device_list_entry); 2267 list_add_tail(&device->delete_list_entry, &delete_list); 2268 } 2269 } 2270 2271 /* Process all new devices. */ 2272 for (i = 0; i < num_new_devices; i++) { 2273 device = new_device_list[i]; 2274 if (!device->new_device) 2275 continue; 2276 if (device->volume_offline) 2277 continue; 2278 list_add_tail(&device->scsi_device_list_entry, 2279 &ctrl_info->scsi_device_list); 2280 list_add_tail(&device->add_list_entry, &add_list); 2281 /* To prevent this device structure from being freed later. */ 2282 device->keep_device = true; 2283 pqi_init_device_tmf_work(device); 2284 } 2285 2286 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2287 2288 /* 2289 * If OFA is in progress and there are devices that need to be deleted, 2290 * allow any pending reset operations to continue and unblock any SCSI 2291 * requests before removal. 2292 */ 2293 if (pqi_ofa_in_progress(ctrl_info)) { 2294 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) 2295 if (pqi_is_device_added(device)) 2296 pqi_device_remove_start(device); 2297 pqi_ctrl_unblock_device_reset(ctrl_info); 2298 pqi_scsi_unblock_requests(ctrl_info); 2299 } 2300 2301 /* Remove all devices that have gone away. */ 2302 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { 2303 if (device->volume_offline) { 2304 pqi_dev_info(ctrl_info, "offline", device); 2305 pqi_show_volume_status(ctrl_info, device); 2306 } else { 2307 pqi_dev_info(ctrl_info, "removed", device); 2308 } 2309 if (pqi_is_device_added(device)) 2310 pqi_remove_device(ctrl_info, device); 2311 list_del(&device->delete_list_entry); 2312 pqi_free_device(device); 2313 } 2314 2315 /* 2316 * Notify the SML of any existing device changes such as; 2317 * queue depth, device size. 2318 */ 2319 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 2320 /* 2321 * Check for queue depth change. 2322 */ 2323 if (device->sdev && device->queue_depth != device->advertised_queue_depth) { 2324 device->advertised_queue_depth = device->queue_depth; 2325 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); 2326 } 2327 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2328 /* 2329 * Check for changes in the device, such as size. 2330 */ 2331 if (pqi_volume_rescan_needed(device)) { 2332 device->rescan = false; 2333 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2334 scsi_rescan_device(device->sdev); 2335 } else { 2336 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2337 } 2338 } 2339 2340 /* Expose any new devices. */ 2341 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 2342 if (!pqi_is_device_added(device)) { 2343 rc = pqi_add_device(ctrl_info, device); 2344 if (rc == 0) { 2345 pqi_dev_info(ctrl_info, "added", device); 2346 } else { 2347 dev_warn(&ctrl_info->pci_dev->dev, 2348 "scsi %d:%d:%d:%d addition failed, device not added\n", 2349 ctrl_info->scsi_host->host_no, 2350 device->bus, device->target, 2351 device->lun); 2352 pqi_fixup_botched_add(ctrl_info, device); 2353 } 2354 } 2355 } 2356 2357 } 2358 2359 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device) 2360 { 2361 /* 2362 * Only support the HBA controller itself as a RAID 2363 * controller. If it's a RAID controller other than 2364 * the HBA itself (an external RAID controller, for 2365 * example), we don't support it. 2366 */ 2367 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && 2368 !pqi_is_hba_lunid(device->scsi3addr)) 2369 return false; 2370 2371 return true; 2372 } 2373 2374 static inline bool pqi_skip_device(u8 *scsi3addr) 2375 { 2376 /* Ignore all masked devices. */ 2377 if (MASKED_DEVICE(scsi3addr)) 2378 return true; 2379 2380 return false; 2381 } 2382 2383 static inline void pqi_mask_device(u8 *scsi3addr) 2384 { 2385 scsi3addr[3] |= 0xc0; 2386 } 2387 2388 static inline bool pqi_expose_device(struct pqi_scsi_dev *device) 2389 { 2390 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); 2391 } 2392 2393 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2394 { 2395 int i; 2396 int rc; 2397 LIST_HEAD(new_device_list_head); 2398 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL; 2399 struct report_log_lun_list *logdev_list = NULL; 2400 struct report_phys_lun_16byte_wwid *phys_lun; 2401 struct report_log_lun *log_lun; 2402 struct bmic_identify_physical_device *id_phys = NULL; 2403 u32 num_physicals; 2404 u32 num_logicals; 2405 struct pqi_scsi_dev **new_device_list = NULL; 2406 struct pqi_scsi_dev *device; 2407 struct pqi_scsi_dev *next; 2408 unsigned int num_new_devices; 2409 unsigned int num_valid_devices; 2410 bool is_physical_device; 2411 u8 *scsi3addr; 2412 unsigned int physical_index; 2413 unsigned int logical_index; 2414 static char *out_of_memory_msg = 2415 "failed to allocate memory, device discovery stopped"; 2416 2417 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 2418 if (rc) 2419 goto out; 2420 2421 if (physdev_list) 2422 num_physicals = 2423 get_unaligned_be32(&physdev_list->header.list_length) 2424 / sizeof(physdev_list->lun_entries[0]); 2425 else 2426 num_physicals = 0; 2427 2428 if (logdev_list) 2429 num_logicals = 2430 get_unaligned_be32(&logdev_list->header.list_length) 2431 / sizeof(logdev_list->lun_entries[0]); 2432 else 2433 num_logicals = 0; 2434 2435 if (num_physicals) { 2436 /* 2437 * We need this buffer for calls to pqi_get_physical_disk_info() 2438 * below. We allocate it here instead of inside 2439 * pqi_get_physical_disk_info() because it's a fairly large 2440 * buffer. 2441 */ 2442 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 2443 if (!id_phys) { 2444 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2445 out_of_memory_msg); 2446 rc = -ENOMEM; 2447 goto out; 2448 } 2449 2450 if (pqi_hide_vsep) { 2451 for (i = num_physicals - 1; i >= 0; i--) { 2452 phys_lun = &physdev_list->lun_entries[i]; 2453 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) { 2454 pqi_mask_device(phys_lun->lunid); 2455 break; 2456 } 2457 } 2458 } 2459 } 2460 2461 if (num_logicals && 2462 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX)) 2463 ctrl_info->lv_drive_type_mix_valid = true; 2464 2465 num_new_devices = num_physicals + num_logicals; 2466 2467 new_device_list = kmalloc_array(num_new_devices, 2468 sizeof(*new_device_list), 2469 GFP_KERNEL); 2470 if (!new_device_list) { 2471 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 2472 rc = -ENOMEM; 2473 goto out; 2474 } 2475 2476 for (i = 0; i < num_new_devices; i++) { 2477 device = kzalloc(sizeof(*device), GFP_KERNEL); 2478 if (!device) { 2479 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2480 out_of_memory_msg); 2481 rc = -ENOMEM; 2482 goto out; 2483 } 2484 list_add_tail(&device->new_device_list_entry, 2485 &new_device_list_head); 2486 } 2487 2488 device = NULL; 2489 num_valid_devices = 0; 2490 physical_index = 0; 2491 logical_index = 0; 2492 2493 for (i = 0; i < num_new_devices; i++) { 2494 2495 if ((!pqi_expose_ld_first && i < num_physicals) || 2496 (pqi_expose_ld_first && i >= num_logicals)) { 2497 is_physical_device = true; 2498 phys_lun = &physdev_list->lun_entries[physical_index++]; 2499 log_lun = NULL; 2500 scsi3addr = phys_lun->lunid; 2501 } else { 2502 is_physical_device = false; 2503 phys_lun = NULL; 2504 log_lun = &logdev_list->lun_entries[logical_index++]; 2505 scsi3addr = log_lun->lunid; 2506 } 2507 2508 if (is_physical_device && pqi_skip_device(scsi3addr)) 2509 continue; 2510 2511 if (device) 2512 device = list_next_entry(device, new_device_list_entry); 2513 else 2514 device = list_first_entry(&new_device_list_head, 2515 struct pqi_scsi_dev, new_device_list_entry); 2516 2517 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 2518 device->is_physical_device = is_physical_device; 2519 if (is_physical_device) { 2520 device->device_type = phys_lun->device_type; 2521 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) 2522 device->is_expander_smp_device = true; 2523 } else { 2524 device->is_external_raid_device = 2525 pqi_is_external_raid_addr(scsi3addr); 2526 } 2527 2528 if (!pqi_is_supported_device(device)) 2529 continue; 2530 2531 /* Gather information about the device. */ 2532 rc = pqi_get_device_info(ctrl_info, device, id_phys); 2533 if (rc == -ENOMEM) { 2534 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2535 out_of_memory_msg); 2536 goto out; 2537 } 2538 if (rc) { 2539 if (device->is_physical_device) 2540 dev_warn(&ctrl_info->pci_dev->dev, 2541 "obtaining device info failed, skipping physical device %016llx%016llx\n", 2542 get_unaligned_be64(&phys_lun->wwid[0]), 2543 get_unaligned_be64(&phys_lun->wwid[8])); 2544 else 2545 dev_warn(&ctrl_info->pci_dev->dev, 2546 "obtaining device info failed, skipping logical device %08x%08x\n", 2547 *((u32 *)&device->scsi3addr), 2548 *((u32 *)&device->scsi3addr[4])); 2549 rc = 0; 2550 continue; 2551 } 2552 2553 /* Do not present disks that the OS cannot fully probe. */ 2554 if (pqi_keep_device_offline(device)) 2555 continue; 2556 2557 pqi_assign_bus_target_lun(device); 2558 2559 if (device->is_physical_device) { 2560 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid)); 2561 if ((phys_lun->device_flags & 2562 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && 2563 phys_lun->aio_handle) { 2564 device->aio_enabled = true; 2565 device->aio_handle = 2566 phys_lun->aio_handle; 2567 } 2568 } else { 2569 memcpy(device->volume_id, log_lun->volume_id, 2570 sizeof(device->volume_id)); 2571 } 2572 2573 device->sas_address = get_unaligned_be64(&device->wwid[0]); 2574 2575 new_device_list[num_valid_devices++] = device; 2576 } 2577 2578 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 2579 2580 out: 2581 list_for_each_entry_safe(device, next, &new_device_list_head, 2582 new_device_list_entry) { 2583 if (device->keep_device) 2584 continue; 2585 list_del(&device->new_device_list_entry); 2586 pqi_free_device(device); 2587 } 2588 2589 kfree(new_device_list); 2590 kfree(physdev_list); 2591 kfree(logdev_list); 2592 kfree(id_phys); 2593 2594 return rc; 2595 } 2596 2597 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2598 { 2599 int rc; 2600 int mutex_acquired; 2601 2602 if (pqi_ctrl_offline(ctrl_info)) 2603 return -ENXIO; 2604 2605 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); 2606 2607 if (!mutex_acquired) { 2608 if (pqi_ctrl_scan_blocked(ctrl_info)) 2609 return -EBUSY; 2610 pqi_schedule_rescan_worker_delayed(ctrl_info); 2611 return -EINPROGRESS; 2612 } 2613 2614 rc = pqi_update_scsi_devices(ctrl_info); 2615 if (rc && !pqi_ctrl_scan_blocked(ctrl_info)) 2616 pqi_schedule_rescan_worker_delayed(ctrl_info); 2617 2618 mutex_unlock(&ctrl_info->scan_mutex); 2619 2620 return rc; 2621 } 2622 2623 static void pqi_scan_start(struct Scsi_Host *shost) 2624 { 2625 struct pqi_ctrl_info *ctrl_info; 2626 2627 ctrl_info = shost_to_hba(shost); 2628 2629 pqi_scan_scsi_devices(ctrl_info); 2630 } 2631 2632 /* Returns TRUE if scan is finished. */ 2633 2634 static int pqi_scan_finished(struct Scsi_Host *shost, 2635 unsigned long elapsed_time) 2636 { 2637 struct pqi_ctrl_info *ctrl_info; 2638 2639 ctrl_info = shost_priv(shost); 2640 2641 return !mutex_is_locked(&ctrl_info->scan_mutex); 2642 } 2643 2644 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info, 2645 struct raid_map *raid_map, u64 first_block) 2646 { 2647 u32 volume_blk_size; 2648 2649 /* 2650 * Set the encryption tweak values based on logical block address. 2651 * If the block size is 512, the tweak value is equal to the LBA. 2652 * For other block sizes, tweak value is (LBA * block size) / 512. 2653 */ 2654 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2655 if (volume_blk_size != 512) 2656 first_block = (first_block * volume_blk_size) / 512; 2657 2658 encryption_info->data_encryption_key_index = 2659 get_unaligned_le16(&raid_map->data_encryption_key_index); 2660 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2661 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2662 } 2663 2664 /* 2665 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2666 */ 2667 2668 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, 2669 struct pqi_scsi_dev_raid_map_data *rmd) 2670 { 2671 bool is_supported = true; 2672 2673 switch (rmd->raid_level) { 2674 case SA_RAID_0: 2675 break; 2676 case SA_RAID_1: 2677 if (rmd->is_write && (!ctrl_info->enable_r1_writes || 2678 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) 2679 is_supported = false; 2680 break; 2681 case SA_RAID_TRIPLE: 2682 if (rmd->is_write && (!ctrl_info->enable_r1_writes || 2683 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) 2684 is_supported = false; 2685 break; 2686 case SA_RAID_5: 2687 if (rmd->is_write && (!ctrl_info->enable_r5_writes || 2688 rmd->data_length > ctrl_info->max_write_raid_5_6)) 2689 is_supported = false; 2690 break; 2691 case SA_RAID_6: 2692 if (rmd->is_write && (!ctrl_info->enable_r6_writes || 2693 rmd->data_length > ctrl_info->max_write_raid_5_6)) 2694 is_supported = false; 2695 break; 2696 default: 2697 is_supported = false; 2698 break; 2699 } 2700 2701 return is_supported; 2702 } 2703 2704 #define PQI_RAID_BYPASS_INELIGIBLE 1 2705 2706 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd, 2707 struct pqi_scsi_dev_raid_map_data *rmd) 2708 { 2709 /* Check for valid opcode, get LBA and block count. */ 2710 switch (scmd->cmnd[0]) { 2711 case WRITE_6: 2712 rmd->is_write = true; 2713 fallthrough; 2714 case READ_6: 2715 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2716 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2717 rmd->block_cnt = (u32)scmd->cmnd[4]; 2718 if (rmd->block_cnt == 0) 2719 rmd->block_cnt = 256; 2720 break; 2721 case WRITE_10: 2722 rmd->is_write = true; 2723 fallthrough; 2724 case READ_10: 2725 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2726 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2727 break; 2728 case WRITE_12: 2729 rmd->is_write = true; 2730 fallthrough; 2731 case READ_12: 2732 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2733 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2734 break; 2735 case WRITE_16: 2736 rmd->is_write = true; 2737 fallthrough; 2738 case READ_16: 2739 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]); 2740 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2741 break; 2742 default: 2743 /* Process via normal I/O path. */ 2744 return PQI_RAID_BYPASS_INELIGIBLE; 2745 } 2746 2747 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length); 2748 2749 return 0; 2750 } 2751 2752 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, 2753 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map) 2754 { 2755 #if BITS_PER_LONG == 32 2756 u64 tmpdiv; 2757 #endif 2758 2759 rmd->last_block = rmd->first_block + rmd->block_cnt - 1; 2760 2761 /* Check for invalid block or wraparound. */ 2762 if (rmd->last_block >= 2763 get_unaligned_le64(&raid_map->volume_blk_cnt) || 2764 rmd->last_block < rmd->first_block) 2765 return PQI_RAID_BYPASS_INELIGIBLE; 2766 2767 rmd->data_disks_per_row = 2768 get_unaligned_le16(&raid_map->data_disks_per_row); 2769 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size); 2770 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2771 2772 /* Calculate stripe information for the request. */ 2773 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; 2774 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ 2775 return PQI_RAID_BYPASS_INELIGIBLE; 2776 #if BITS_PER_LONG == 32 2777 tmpdiv = rmd->first_block; 2778 do_div(tmpdiv, rmd->blocks_per_row); 2779 rmd->first_row = tmpdiv; 2780 tmpdiv = rmd->last_block; 2781 do_div(tmpdiv, rmd->blocks_per_row); 2782 rmd->last_row = tmpdiv; 2783 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row)); 2784 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row)); 2785 tmpdiv = rmd->first_row_offset; 2786 do_div(tmpdiv, rmd->strip_size); 2787 rmd->first_column = tmpdiv; 2788 tmpdiv = rmd->last_row_offset; 2789 do_div(tmpdiv, rmd->strip_size); 2790 rmd->last_column = tmpdiv; 2791 #else 2792 rmd->first_row = rmd->first_block / rmd->blocks_per_row; 2793 rmd->last_row = rmd->last_block / rmd->blocks_per_row; 2794 rmd->first_row_offset = (u32)(rmd->first_block - 2795 (rmd->first_row * rmd->blocks_per_row)); 2796 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * 2797 rmd->blocks_per_row)); 2798 rmd->first_column = rmd->first_row_offset / rmd->strip_size; 2799 rmd->last_column = rmd->last_row_offset / rmd->strip_size; 2800 #endif 2801 2802 /* If this isn't a single row/column then give to the controller. */ 2803 if (rmd->first_row != rmd->last_row || 2804 rmd->first_column != rmd->last_column) 2805 return PQI_RAID_BYPASS_INELIGIBLE; 2806 2807 /* Proceeding with driver mapping. */ 2808 rmd->total_disks_per_row = rmd->data_disks_per_row + 2809 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2810 rmd->map_row = ((u32)(rmd->first_row >> 2811 raid_map->parity_rotation_shift)) % 2812 get_unaligned_le16(&raid_map->row_cnt); 2813 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + 2814 rmd->first_column; 2815 2816 return 0; 2817 } 2818 2819 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd, 2820 struct raid_map *raid_map) 2821 { 2822 #if BITS_PER_LONG == 32 2823 u64 tmpdiv; 2824 #endif 2825 2826 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ 2827 return PQI_RAID_BYPASS_INELIGIBLE; 2828 2829 /* RAID 50/60 */ 2830 /* Verify first and last block are in same RAID group. */ 2831 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; 2832 #if BITS_PER_LONG == 32 2833 tmpdiv = rmd->first_block; 2834 rmd->first_group = do_div(tmpdiv, rmd->stripesize); 2835 tmpdiv = rmd->first_group; 2836 do_div(tmpdiv, rmd->blocks_per_row); 2837 rmd->first_group = tmpdiv; 2838 tmpdiv = rmd->last_block; 2839 rmd->last_group = do_div(tmpdiv, rmd->stripesize); 2840 tmpdiv = rmd->last_group; 2841 do_div(tmpdiv, rmd->blocks_per_row); 2842 rmd->last_group = tmpdiv; 2843 #else 2844 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row; 2845 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row; 2846 #endif 2847 if (rmd->first_group != rmd->last_group) 2848 return PQI_RAID_BYPASS_INELIGIBLE; 2849 2850 /* Verify request is in a single row of RAID 5/6. */ 2851 #if BITS_PER_LONG == 32 2852 tmpdiv = rmd->first_block; 2853 do_div(tmpdiv, rmd->stripesize); 2854 rmd->first_row = tmpdiv; 2855 rmd->r5or6_first_row = tmpdiv; 2856 tmpdiv = rmd->last_block; 2857 do_div(tmpdiv, rmd->stripesize); 2858 rmd->r5or6_last_row = tmpdiv; 2859 #else 2860 rmd->first_row = rmd->r5or6_first_row = 2861 rmd->first_block / rmd->stripesize; 2862 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; 2863 #endif 2864 if (rmd->r5or6_first_row != rmd->r5or6_last_row) 2865 return PQI_RAID_BYPASS_INELIGIBLE; 2866 2867 /* Verify request is in a single column. */ 2868 #if BITS_PER_LONG == 32 2869 tmpdiv = rmd->first_block; 2870 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize); 2871 tmpdiv = rmd->first_row_offset; 2872 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row); 2873 rmd->r5or6_first_row_offset = rmd->first_row_offset; 2874 tmpdiv = rmd->last_block; 2875 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize); 2876 tmpdiv = rmd->r5or6_last_row_offset; 2877 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row); 2878 tmpdiv = rmd->r5or6_first_row_offset; 2879 do_div(tmpdiv, rmd->strip_size); 2880 rmd->first_column = rmd->r5or6_first_column = tmpdiv; 2881 tmpdiv = rmd->r5or6_last_row_offset; 2882 do_div(tmpdiv, rmd->strip_size); 2883 rmd->r5or6_last_column = tmpdiv; 2884 #else 2885 rmd->first_row_offset = rmd->r5or6_first_row_offset = 2886 (u32)((rmd->first_block % rmd->stripesize) % 2887 rmd->blocks_per_row); 2888 2889 rmd->r5or6_last_row_offset = 2890 (u32)((rmd->last_block % rmd->stripesize) % 2891 rmd->blocks_per_row); 2892 2893 rmd->first_column = 2894 rmd->r5or6_first_row_offset / rmd->strip_size; 2895 rmd->r5or6_first_column = rmd->first_column; 2896 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; 2897 #endif 2898 if (rmd->r5or6_first_column != rmd->r5or6_last_column) 2899 return PQI_RAID_BYPASS_INELIGIBLE; 2900 2901 /* Request is eligible. */ 2902 rmd->map_row = 2903 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % 2904 get_unaligned_le16(&raid_map->row_cnt); 2905 2906 rmd->map_index = (rmd->first_group * 2907 (get_unaligned_le16(&raid_map->row_cnt) * 2908 rmd->total_disks_per_row)) + 2909 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; 2910 2911 if (rmd->is_write) { 2912 u32 index; 2913 2914 /* 2915 * p_parity_it_nexus and q_parity_it_nexus are pointers to the 2916 * parity entries inside the device's raid_map. 2917 * 2918 * A device's RAID map is bounded by: number of RAID disks squared. 2919 * 2920 * The devices RAID map size is checked during device 2921 * initialization. 2922 */ 2923 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); 2924 index *= rmd->total_disks_per_row; 2925 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row); 2926 2927 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; 2928 if (rmd->raid_level == SA_RAID_6) { 2929 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; 2930 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; 2931 } 2932 #if BITS_PER_LONG == 32 2933 tmpdiv = rmd->first_block; 2934 do_div(tmpdiv, rmd->blocks_per_row); 2935 rmd->row = tmpdiv; 2936 #else 2937 rmd->row = rmd->first_block / rmd->blocks_per_row; 2938 #endif 2939 } 2940 2941 return 0; 2942 } 2943 2944 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) 2945 { 2946 /* Build the new CDB for the physical disk I/O. */ 2947 if (rmd->disk_block > 0xffffffff) { 2948 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; 2949 rmd->cdb[1] = 0; 2950 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]); 2951 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]); 2952 rmd->cdb[14] = 0; 2953 rmd->cdb[15] = 0; 2954 rmd->cdb_length = 16; 2955 } else { 2956 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; 2957 rmd->cdb[1] = 0; 2958 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]); 2959 rmd->cdb[6] = 0; 2960 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]); 2961 rmd->cdb[9] = 0; 2962 rmd->cdb_length = 10; 2963 } 2964 } 2965 2966 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map, 2967 struct pqi_scsi_dev_raid_map_data *rmd) 2968 { 2969 u32 index; 2970 u32 group; 2971 2972 group = rmd->map_index / rmd->data_disks_per_row; 2973 2974 index = rmd->map_index - (group * rmd->data_disks_per_row); 2975 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; 2976 index += rmd->data_disks_per_row; 2977 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; 2978 if (rmd->layout_map_count > 2) { 2979 index += rmd->data_disks_per_row; 2980 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; 2981 } 2982 2983 rmd->num_it_nexus_entries = rmd->layout_map_count; 2984 } 2985 2986 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2987 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2988 struct pqi_queue_group *queue_group) 2989 { 2990 int rc; 2991 struct raid_map *raid_map; 2992 u32 group; 2993 u32 next_bypass_group; 2994 struct pqi_encryption_info *encryption_info_ptr; 2995 struct pqi_encryption_info encryption_info; 2996 struct pqi_scsi_dev_raid_map_data rmd = { 0 }; 2997 2998 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); 2999 if (rc) 3000 return PQI_RAID_BYPASS_INELIGIBLE; 3001 3002 rmd.raid_level = device->raid_level; 3003 3004 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) 3005 return PQI_RAID_BYPASS_INELIGIBLE; 3006 3007 if (unlikely(rmd.block_cnt == 0)) 3008 return PQI_RAID_BYPASS_INELIGIBLE; 3009 3010 raid_map = device->raid_map; 3011 3012 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); 3013 if (rc) 3014 return PQI_RAID_BYPASS_INELIGIBLE; 3015 3016 if (device->raid_level == SA_RAID_1 || 3017 device->raid_level == SA_RAID_TRIPLE) { 3018 if (rmd.is_write) { 3019 pqi_calc_aio_r1_nexus(raid_map, &rmd); 3020 } else { 3021 group = device->next_bypass_group[rmd.map_index]; 3022 next_bypass_group = group + 1; 3023 if (next_bypass_group >= rmd.layout_map_count) 3024 next_bypass_group = 0; 3025 device->next_bypass_group[rmd.map_index] = next_bypass_group; 3026 rmd.map_index += group * rmd.data_disks_per_row; 3027 } 3028 } else if ((device->raid_level == SA_RAID_5 || 3029 device->raid_level == SA_RAID_6) && 3030 (rmd.layout_map_count > 1 || rmd.is_write)) { 3031 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map); 3032 if (rc) 3033 return PQI_RAID_BYPASS_INELIGIBLE; 3034 } 3035 3036 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) 3037 return PQI_RAID_BYPASS_INELIGIBLE; 3038 3039 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; 3040 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 3041 rmd.first_row * rmd.strip_size + 3042 (rmd.first_row_offset - rmd.first_column * rmd.strip_size); 3043 rmd.disk_block_cnt = rmd.block_cnt; 3044 3045 /* Handle differing logical/physical block sizes. */ 3046 if (raid_map->phys_blk_shift) { 3047 rmd.disk_block <<= raid_map->phys_blk_shift; 3048 rmd.disk_block_cnt <<= raid_map->phys_blk_shift; 3049 } 3050 3051 if (unlikely(rmd.disk_block_cnt > 0xffff)) 3052 return PQI_RAID_BYPASS_INELIGIBLE; 3053 3054 pqi_set_aio_cdb(&rmd); 3055 3056 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) { 3057 if (rmd.data_length > device->max_transfer_encrypted) 3058 return PQI_RAID_BYPASS_INELIGIBLE; 3059 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block); 3060 encryption_info_ptr = &encryption_info; 3061 } else { 3062 encryption_info_ptr = NULL; 3063 } 3064 3065 if (rmd.is_write) { 3066 switch (device->raid_level) { 3067 case SA_RAID_1: 3068 case SA_RAID_TRIPLE: 3069 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, 3070 encryption_info_ptr, device, &rmd); 3071 case SA_RAID_5: 3072 case SA_RAID_6: 3073 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, 3074 encryption_info_ptr, device, &rmd); 3075 } 3076 } 3077 3078 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, 3079 rmd.cdb, rmd.cdb_length, queue_group, 3080 encryption_info_ptr, true, false); 3081 } 3082 3083 #define PQI_STATUS_IDLE 0x0 3084 3085 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 3086 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 3087 3088 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 3089 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 3090 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 3091 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 3092 #define PQI_DEVICE_STATE_ERROR 0x4 3093 3094 #define PQI_MODE_READY_TIMEOUT_SECS 30 3095 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 3096 3097 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 3098 { 3099 struct pqi_device_registers __iomem *pqi_registers; 3100 unsigned long timeout; 3101 u64 signature; 3102 u8 status; 3103 3104 pqi_registers = ctrl_info->pqi_registers; 3105 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; 3106 3107 while (1) { 3108 signature = readq(&pqi_registers->signature); 3109 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 3110 sizeof(signature)) == 0) 3111 break; 3112 if (time_after(jiffies, timeout)) { 3113 dev_err(&ctrl_info->pci_dev->dev, 3114 "timed out waiting for PQI signature\n"); 3115 return -ETIMEDOUT; 3116 } 3117 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3118 } 3119 3120 while (1) { 3121 status = readb(&pqi_registers->function_and_status_code); 3122 if (status == PQI_STATUS_IDLE) 3123 break; 3124 if (time_after(jiffies, timeout)) { 3125 dev_err(&ctrl_info->pci_dev->dev, 3126 "timed out waiting for PQI IDLE\n"); 3127 return -ETIMEDOUT; 3128 } 3129 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3130 } 3131 3132 while (1) { 3133 if (readl(&pqi_registers->device_status) == 3134 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 3135 break; 3136 if (time_after(jiffies, timeout)) { 3137 dev_err(&ctrl_info->pci_dev->dev, 3138 "timed out waiting for PQI all registers ready\n"); 3139 return -ETIMEDOUT; 3140 } 3141 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3142 } 3143 3144 return 0; 3145 } 3146 3147 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 3148 { 3149 struct pqi_scsi_dev *device; 3150 3151 device = io_request->scmd->device->hostdata; 3152 device->raid_bypass_enabled = false; 3153 device->aio_enabled = false; 3154 } 3155 3156 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 3157 { 3158 struct pqi_ctrl_info *ctrl_info; 3159 struct pqi_scsi_dev *device; 3160 3161 device = sdev->hostdata; 3162 if (device->device_offline) 3163 return; 3164 3165 device->device_offline = true; 3166 ctrl_info = shost_to_hba(sdev->host); 3167 pqi_schedule_rescan_worker(ctrl_info); 3168 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", 3169 path, ctrl_info->scsi_host->host_no, device->bus, 3170 device->target, device->lun); 3171 } 3172 3173 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 3174 { 3175 u8 scsi_status; 3176 u8 host_byte; 3177 struct scsi_cmnd *scmd; 3178 struct pqi_raid_error_info *error_info; 3179 size_t sense_data_length; 3180 int residual_count; 3181 int xfer_count; 3182 struct scsi_sense_hdr sshdr; 3183 3184 scmd = io_request->scmd; 3185 if (!scmd) 3186 return; 3187 3188 error_info = io_request->error_info; 3189 scsi_status = error_info->status; 3190 host_byte = DID_OK; 3191 3192 switch (error_info->data_out_result) { 3193 case PQI_DATA_IN_OUT_GOOD: 3194 break; 3195 case PQI_DATA_IN_OUT_UNDERFLOW: 3196 xfer_count = 3197 get_unaligned_le32(&error_info->data_out_transferred); 3198 residual_count = scsi_bufflen(scmd) - xfer_count; 3199 scsi_set_resid(scmd, residual_count); 3200 if (xfer_count < scmd->underflow) 3201 host_byte = DID_SOFT_ERROR; 3202 break; 3203 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 3204 case PQI_DATA_IN_OUT_ABORTED: 3205 host_byte = DID_ABORT; 3206 break; 3207 case PQI_DATA_IN_OUT_TIMEOUT: 3208 host_byte = DID_TIME_OUT; 3209 break; 3210 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 3211 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 3212 case PQI_DATA_IN_OUT_BUFFER_ERROR: 3213 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 3214 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 3215 case PQI_DATA_IN_OUT_ERROR: 3216 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 3217 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 3218 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 3219 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 3220 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 3221 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 3222 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 3223 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 3224 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 3225 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 3226 default: 3227 host_byte = DID_ERROR; 3228 break; 3229 } 3230 3231 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 3232 if (sense_data_length == 0) 3233 sense_data_length = 3234 get_unaligned_le16(&error_info->response_data_length); 3235 if (sense_data_length) { 3236 if (sense_data_length > sizeof(error_info->data)) 3237 sense_data_length = sizeof(error_info->data); 3238 3239 if (scsi_status == SAM_STAT_CHECK_CONDITION && 3240 scsi_normalize_sense(error_info->data, 3241 sense_data_length, &sshdr) && 3242 sshdr.sense_key == HARDWARE_ERROR && 3243 sshdr.asc == 0x3e) { 3244 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); 3245 struct pqi_scsi_dev *device = scmd->device->hostdata; 3246 3247 switch (sshdr.ascq) { 3248 case 0x1: /* LOGICAL UNIT FAILURE */ 3249 if (printk_ratelimit()) 3250 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", 3251 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 3252 pqi_take_device_offline(scmd->device, "RAID"); 3253 host_byte = DID_NO_CONNECT; 3254 break; 3255 3256 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ 3257 if (printk_ratelimit()) 3258 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", 3259 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 3260 break; 3261 } 3262 } 3263 3264 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 3265 sense_data_length = SCSI_SENSE_BUFFERSIZE; 3266 memcpy(scmd->sense_buffer, error_info->data, 3267 sense_data_length); 3268 } 3269 3270 if (pqi_cmd_priv(scmd)->this_residual && 3271 !pqi_is_logical_device(scmd->device->hostdata) && 3272 scsi_status == SAM_STAT_CHECK_CONDITION && 3273 host_byte == DID_OK && 3274 sense_data_length && 3275 scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) && 3276 sshdr.sense_key == ILLEGAL_REQUEST && 3277 sshdr.asc == 0x26 && 3278 sshdr.ascq == 0x0) { 3279 host_byte = DID_NO_CONNECT; 3280 pqi_take_device_offline(scmd->device, "AIO"); 3281 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1); 3282 } 3283 3284 scmd->result = scsi_status; 3285 set_host_byte(scmd, host_byte); 3286 } 3287 3288 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 3289 { 3290 u8 scsi_status; 3291 u8 host_byte; 3292 struct scsi_cmnd *scmd; 3293 struct pqi_aio_error_info *error_info; 3294 size_t sense_data_length; 3295 int residual_count; 3296 int xfer_count; 3297 bool device_offline; 3298 3299 scmd = io_request->scmd; 3300 error_info = io_request->error_info; 3301 host_byte = DID_OK; 3302 sense_data_length = 0; 3303 device_offline = false; 3304 3305 switch (error_info->service_response) { 3306 case PQI_AIO_SERV_RESPONSE_COMPLETE: 3307 scsi_status = error_info->status; 3308 break; 3309 case PQI_AIO_SERV_RESPONSE_FAILURE: 3310 switch (error_info->status) { 3311 case PQI_AIO_STATUS_IO_ABORTED: 3312 scsi_status = SAM_STAT_TASK_ABORTED; 3313 break; 3314 case PQI_AIO_STATUS_UNDERRUN: 3315 scsi_status = SAM_STAT_GOOD; 3316 residual_count = get_unaligned_le32( 3317 &error_info->residual_count); 3318 scsi_set_resid(scmd, residual_count); 3319 xfer_count = scsi_bufflen(scmd) - residual_count; 3320 if (xfer_count < scmd->underflow) 3321 host_byte = DID_SOFT_ERROR; 3322 break; 3323 case PQI_AIO_STATUS_OVERRUN: 3324 scsi_status = SAM_STAT_GOOD; 3325 break; 3326 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 3327 pqi_aio_path_disabled(io_request); 3328 scsi_status = SAM_STAT_GOOD; 3329 io_request->status = -EAGAIN; 3330 break; 3331 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 3332 case PQI_AIO_STATUS_INVALID_DEVICE: 3333 if (!io_request->raid_bypass) { 3334 device_offline = true; 3335 pqi_take_device_offline(scmd->device, "AIO"); 3336 host_byte = DID_NO_CONNECT; 3337 } 3338 scsi_status = SAM_STAT_CHECK_CONDITION; 3339 break; 3340 case PQI_AIO_STATUS_IO_ERROR: 3341 default: 3342 scsi_status = SAM_STAT_CHECK_CONDITION; 3343 break; 3344 } 3345 break; 3346 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 3347 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 3348 scsi_status = SAM_STAT_GOOD; 3349 break; 3350 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 3351 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 3352 default: 3353 scsi_status = SAM_STAT_CHECK_CONDITION; 3354 break; 3355 } 3356 3357 if (error_info->data_present) { 3358 sense_data_length = 3359 get_unaligned_le16(&error_info->data_length); 3360 if (sense_data_length) { 3361 if (sense_data_length > sizeof(error_info->data)) 3362 sense_data_length = sizeof(error_info->data); 3363 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 3364 sense_data_length = SCSI_SENSE_BUFFERSIZE; 3365 memcpy(scmd->sense_buffer, error_info->data, 3366 sense_data_length); 3367 } 3368 } 3369 3370 if (device_offline && sense_data_length == 0) 3371 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1); 3372 3373 scmd->result = scsi_status; 3374 set_host_byte(scmd, host_byte); 3375 } 3376 3377 static void pqi_process_io_error(unsigned int iu_type, 3378 struct pqi_io_request *io_request) 3379 { 3380 switch (iu_type) { 3381 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 3382 pqi_process_raid_io_error(io_request); 3383 break; 3384 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 3385 pqi_process_aio_io_error(io_request); 3386 break; 3387 } 3388 } 3389 3390 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info, 3391 struct pqi_task_management_response *response) 3392 { 3393 int rc; 3394 3395 switch (response->response_code) { 3396 case SOP_TMF_COMPLETE: 3397 case SOP_TMF_FUNCTION_SUCCEEDED: 3398 rc = 0; 3399 break; 3400 case SOP_TMF_REJECTED: 3401 rc = -EAGAIN; 3402 break; 3403 case SOP_TMF_INCORRECT_LOGICAL_UNIT: 3404 rc = -ENODEV; 3405 break; 3406 default: 3407 rc = -EIO; 3408 break; 3409 } 3410 3411 if (rc) 3412 dev_err(&ctrl_info->pci_dev->dev, 3413 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code); 3414 3415 return rc; 3416 } 3417 3418 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info, 3419 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 3420 { 3421 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason); 3422 } 3423 3424 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) 3425 { 3426 int num_responses; 3427 pqi_index_t oq_pi; 3428 pqi_index_t oq_ci; 3429 struct pqi_io_request *io_request; 3430 struct pqi_io_response *response; 3431 u16 request_id; 3432 3433 num_responses = 0; 3434 oq_ci = queue_group->oq_ci_copy; 3435 3436 while (1) { 3437 oq_pi = readl(queue_group->oq_pi); 3438 if (oq_pi >= ctrl_info->num_elements_per_oq) { 3439 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE); 3440 dev_err(&ctrl_info->pci_dev->dev, 3441 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3442 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); 3443 return -1; 3444 } 3445 if (oq_pi == oq_ci) 3446 break; 3447 3448 num_responses++; 3449 response = queue_group->oq_element_array + 3450 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 3451 3452 request_id = get_unaligned_le16(&response->request_id); 3453 if (request_id >= ctrl_info->max_io_slots) { 3454 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID); 3455 dev_err(&ctrl_info->pci_dev->dev, 3456 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", 3457 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); 3458 return -1; 3459 } 3460 3461 io_request = &ctrl_info->io_request_pool[request_id]; 3462 if (atomic_read(&io_request->refcount) == 0) { 3463 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID); 3464 dev_err(&ctrl_info->pci_dev->dev, 3465 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", 3466 request_id, oq_pi, oq_ci); 3467 return -1; 3468 } 3469 3470 switch (response->header.iu_type) { 3471 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 3472 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 3473 if (io_request->scmd) 3474 io_request->scmd->result = 0; 3475 fallthrough; 3476 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 3477 break; 3478 case PQI_RESPONSE_IU_VENDOR_GENERAL: 3479 io_request->status = 3480 get_unaligned_le16( 3481 &((struct pqi_vendor_general_response *)response)->status); 3482 break; 3483 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 3484 io_request->status = pqi_interpret_task_management_response(ctrl_info, 3485 (void *)response); 3486 break; 3487 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 3488 pqi_aio_path_disabled(io_request); 3489 io_request->status = -EAGAIN; 3490 break; 3491 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 3492 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 3493 io_request->error_info = ctrl_info->error_buffer + 3494 (get_unaligned_le16(&response->error_index) * 3495 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 3496 pqi_process_io_error(response->header.iu_type, io_request); 3497 break; 3498 default: 3499 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE); 3500 dev_err(&ctrl_info->pci_dev->dev, 3501 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", 3502 response->header.iu_type, oq_pi, oq_ci); 3503 return -1; 3504 } 3505 3506 io_request->io_complete_callback(io_request, io_request->context); 3507 3508 /* 3509 * Note that the I/O request structure CANNOT BE TOUCHED after 3510 * returning from the I/O completion callback! 3511 */ 3512 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 3513 } 3514 3515 if (num_responses) { 3516 queue_group->oq_ci_copy = oq_ci; 3517 writel(oq_ci, queue_group->oq_ci); 3518 } 3519 3520 return num_responses; 3521 } 3522 3523 static inline unsigned int pqi_num_elements_free(unsigned int pi, 3524 unsigned int ci, unsigned int elements_in_queue) 3525 { 3526 unsigned int num_elements_used; 3527 3528 if (pi >= ci) 3529 num_elements_used = pi - ci; 3530 else 3531 num_elements_used = elements_in_queue - ci + pi; 3532 3533 return elements_in_queue - num_elements_used - 1; 3534 } 3535 3536 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 3537 struct pqi_event_acknowledge_request *iu, size_t iu_length) 3538 { 3539 pqi_index_t iq_pi; 3540 pqi_index_t iq_ci; 3541 unsigned long flags; 3542 void *next_element; 3543 struct pqi_queue_group *queue_group; 3544 3545 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 3546 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 3547 3548 while (1) { 3549 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 3550 3551 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 3552 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); 3553 3554 if (pqi_num_elements_free(iq_pi, iq_ci, 3555 ctrl_info->num_elements_per_iq)) 3556 break; 3557 3558 spin_unlock_irqrestore( 3559 &queue_group->submit_lock[RAID_PATH], flags); 3560 3561 if (pqi_ctrl_offline(ctrl_info)) 3562 return; 3563 } 3564 3565 next_element = queue_group->iq_element_array[RAID_PATH] + 3566 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3567 3568 memcpy(next_element, iu, iu_length); 3569 3570 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 3571 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 3572 3573 /* 3574 * This write notifies the controller that an IU is available to be 3575 * processed. 3576 */ 3577 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 3578 3579 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 3580 } 3581 3582 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 3583 struct pqi_event *event) 3584 { 3585 struct pqi_event_acknowledge_request request; 3586 3587 memset(&request, 0, sizeof(request)); 3588 3589 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 3590 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 3591 &request.header.iu_length); 3592 request.event_type = event->event_type; 3593 put_unaligned_le16(event->event_id, &request.event_id); 3594 put_unaligned_le32(event->additional_event_id, &request.additional_event_id); 3595 3596 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 3597 } 3598 3599 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 3600 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 3601 3602 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( 3603 struct pqi_ctrl_info *ctrl_info) 3604 { 3605 u8 status; 3606 unsigned long timeout; 3607 3608 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies; 3609 3610 while (1) { 3611 status = pqi_read_soft_reset_status(ctrl_info); 3612 if (status & PQI_SOFT_RESET_INITIATE) 3613 return RESET_INITIATE_DRIVER; 3614 3615 if (status & PQI_SOFT_RESET_ABORT) 3616 return RESET_ABORT; 3617 3618 if (!sis_is_firmware_running(ctrl_info)) 3619 return RESET_NORESPONSE; 3620 3621 if (time_after(jiffies, timeout)) { 3622 dev_warn(&ctrl_info->pci_dev->dev, 3623 "timed out waiting for soft reset status\n"); 3624 return RESET_TIMEDOUT; 3625 } 3626 3627 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); 3628 } 3629 } 3630 3631 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) 3632 { 3633 int rc; 3634 unsigned int delay_secs; 3635 enum pqi_soft_reset_status reset_status; 3636 3637 if (ctrl_info->soft_reset_handshake_supported) 3638 reset_status = pqi_poll_for_soft_reset_status(ctrl_info); 3639 else 3640 reset_status = RESET_INITIATE_FIRMWARE; 3641 3642 delay_secs = PQI_POST_RESET_DELAY_SECS; 3643 3644 switch (reset_status) { 3645 case RESET_TIMEDOUT: 3646 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS; 3647 fallthrough; 3648 case RESET_INITIATE_DRIVER: 3649 dev_info(&ctrl_info->pci_dev->dev, 3650 "Online Firmware Activation: resetting controller\n"); 3651 sis_soft_reset(ctrl_info); 3652 fallthrough; 3653 case RESET_INITIATE_FIRMWARE: 3654 ctrl_info->pqi_mode_enabled = false; 3655 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 3656 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); 3657 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3658 pqi_ctrl_ofa_done(ctrl_info); 3659 dev_info(&ctrl_info->pci_dev->dev, 3660 "Online Firmware Activation: %s\n", 3661 rc == 0 ? "SUCCESS" : "FAILED"); 3662 break; 3663 case RESET_ABORT: 3664 dev_info(&ctrl_info->pci_dev->dev, 3665 "Online Firmware Activation ABORTED\n"); 3666 if (ctrl_info->soft_reset_handshake_supported) 3667 pqi_clear_soft_reset_status(ctrl_info); 3668 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3669 pqi_ctrl_ofa_done(ctrl_info); 3670 pqi_ofa_ctrl_unquiesce(ctrl_info); 3671 break; 3672 case RESET_NORESPONSE: 3673 fallthrough; 3674 default: 3675 dev_err(&ctrl_info->pci_dev->dev, 3676 "unexpected Online Firmware Activation reset status: 0x%x\n", 3677 reset_status); 3678 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3679 pqi_ctrl_ofa_done(ctrl_info); 3680 pqi_ofa_ctrl_unquiesce(ctrl_info); 3681 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); 3682 break; 3683 } 3684 } 3685 3686 static void pqi_ofa_memory_alloc_worker(struct work_struct *work) 3687 { 3688 struct pqi_ctrl_info *ctrl_info; 3689 3690 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); 3691 3692 pqi_ctrl_ofa_start(ctrl_info); 3693 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested); 3694 pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE); 3695 } 3696 3697 static void pqi_ofa_quiesce_worker(struct work_struct *work) 3698 { 3699 struct pqi_ctrl_info *ctrl_info; 3700 struct pqi_event *event; 3701 3702 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work); 3703 3704 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; 3705 3706 pqi_ofa_ctrl_quiesce(ctrl_info); 3707 pqi_acknowledge_event(ctrl_info, event); 3708 pqi_process_soft_reset(ctrl_info); 3709 } 3710 3711 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, 3712 struct pqi_event *event) 3713 { 3714 bool ack_event; 3715 3716 ack_event = true; 3717 3718 switch (event->event_id) { 3719 case PQI_EVENT_OFA_MEMORY_ALLOCATION: 3720 dev_info(&ctrl_info->pci_dev->dev, 3721 "received Online Firmware Activation memory allocation request\n"); 3722 schedule_work(&ctrl_info->ofa_memory_alloc_work); 3723 break; 3724 case PQI_EVENT_OFA_QUIESCE: 3725 dev_info(&ctrl_info->pci_dev->dev, 3726 "received Online Firmware Activation quiesce request\n"); 3727 schedule_work(&ctrl_info->ofa_quiesce_work); 3728 ack_event = false; 3729 break; 3730 case PQI_EVENT_OFA_CANCELED: 3731 dev_info(&ctrl_info->pci_dev->dev, 3732 "received Online Firmware Activation cancel request: reason: %u\n", 3733 ctrl_info->ofa_cancel_reason); 3734 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3735 pqi_ctrl_ofa_done(ctrl_info); 3736 break; 3737 default: 3738 dev_err(&ctrl_info->pci_dev->dev, 3739 "received unknown Online Firmware Activation request: event ID: %u\n", 3740 event->event_id); 3741 break; 3742 } 3743 3744 return ack_event; 3745 } 3746 3747 static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info) 3748 { 3749 unsigned long flags; 3750 struct pqi_scsi_dev *device; 3751 3752 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 3753 3754 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 3755 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) 3756 device->rescan = true; 3757 } 3758 3759 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 3760 } 3761 3762 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info) 3763 { 3764 unsigned long flags; 3765 struct pqi_scsi_dev *device; 3766 3767 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 3768 3769 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 3770 if (device->raid_bypass_enabled) 3771 device->raid_bypass_enabled = false; 3772 3773 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 3774 } 3775 3776 static void pqi_event_worker(struct work_struct *work) 3777 { 3778 unsigned int i; 3779 bool rescan_needed; 3780 struct pqi_ctrl_info *ctrl_info; 3781 struct pqi_event *event; 3782 bool ack_event; 3783 3784 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 3785 3786 pqi_ctrl_busy(ctrl_info); 3787 pqi_wait_if_ctrl_blocked(ctrl_info); 3788 if (pqi_ctrl_offline(ctrl_info)) 3789 goto out; 3790 3791 rescan_needed = false; 3792 event = ctrl_info->events; 3793 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 3794 if (event->pending) { 3795 event->pending = false; 3796 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3797 ack_event = pqi_ofa_process_event(ctrl_info, event); 3798 } else { 3799 ack_event = true; 3800 rescan_needed = true; 3801 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE) 3802 pqi_mark_volumes_for_rescan(ctrl_info); 3803 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE) 3804 pqi_disable_raid_bypass(ctrl_info); 3805 } 3806 if (ack_event) 3807 pqi_acknowledge_event(ctrl_info, event); 3808 } 3809 event++; 3810 } 3811 3812 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ) 3813 3814 if (rescan_needed) 3815 pqi_schedule_rescan_worker_with_delay(ctrl_info, 3816 PQI_RESCAN_WORK_FOR_EVENT_DELAY); 3817 3818 out: 3819 pqi_ctrl_unbusy(ctrl_info); 3820 } 3821 3822 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 3823 3824 static void pqi_heartbeat_timer_handler(struct timer_list *t) 3825 { 3826 int num_interrupts; 3827 u32 heartbeat_count; 3828 struct pqi_ctrl_info *ctrl_info = timer_container_of(ctrl_info, t, 3829 heartbeat_timer); 3830 3831 pqi_check_ctrl_health(ctrl_info); 3832 if (pqi_ctrl_offline(ctrl_info)) 3833 return; 3834 3835 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 3836 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 3837 3838 if (num_interrupts == ctrl_info->previous_num_interrupts) { 3839 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 3840 dev_err(&ctrl_info->pci_dev->dev, 3841 "no heartbeat detected - last heartbeat count: %u\n", 3842 heartbeat_count); 3843 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT); 3844 return; 3845 } 3846 } else { 3847 ctrl_info->previous_num_interrupts = num_interrupts; 3848 } 3849 3850 ctrl_info->previous_heartbeat_count = heartbeat_count; 3851 mod_timer(&ctrl_info->heartbeat_timer, 3852 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 3853 } 3854 3855 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3856 { 3857 if (!ctrl_info->heartbeat_counter) 3858 return; 3859 3860 ctrl_info->previous_num_interrupts = 3861 atomic_read(&ctrl_info->num_interrupts); 3862 ctrl_info->previous_heartbeat_count = 3863 pqi_read_heartbeat_counter(ctrl_info); 3864 3865 ctrl_info->heartbeat_timer.expires = 3866 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 3867 add_timer(&ctrl_info->heartbeat_timer); 3868 } 3869 3870 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3871 { 3872 timer_delete_sync(&ctrl_info->heartbeat_timer); 3873 } 3874 3875 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, 3876 struct pqi_event *event, struct pqi_event_response *response) 3877 { 3878 switch (event->event_id) { 3879 case PQI_EVENT_OFA_MEMORY_ALLOCATION: 3880 ctrl_info->ofa_bytes_requested = 3881 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested); 3882 break; 3883 case PQI_EVENT_OFA_CANCELED: 3884 ctrl_info->ofa_cancel_reason = 3885 get_unaligned_le16(&response->data.ofa_cancelled.reason); 3886 break; 3887 } 3888 } 3889 3890 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 3891 { 3892 int num_events; 3893 pqi_index_t oq_pi; 3894 pqi_index_t oq_ci; 3895 struct pqi_event_queue *event_queue; 3896 struct pqi_event_response *response; 3897 struct pqi_event *event; 3898 int event_index; 3899 3900 event_queue = &ctrl_info->event_queue; 3901 num_events = 0; 3902 oq_ci = event_queue->oq_ci_copy; 3903 3904 while (1) { 3905 oq_pi = readl(event_queue->oq_pi); 3906 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { 3907 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE); 3908 dev_err(&ctrl_info->pci_dev->dev, 3909 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3910 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); 3911 return -1; 3912 } 3913 3914 if (oq_pi == oq_ci) 3915 break; 3916 3917 num_events++; 3918 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 3919 3920 event_index = pqi_event_type_to_event_index(response->event_type); 3921 3922 if (event_index >= 0 && response->request_acknowledge) { 3923 event = &ctrl_info->events[event_index]; 3924 event->pending = true; 3925 event->event_type = response->event_type; 3926 event->event_id = get_unaligned_le16(&response->event_id); 3927 event->additional_event_id = 3928 get_unaligned_le32(&response->additional_event_id); 3929 if (event->event_type == PQI_EVENT_TYPE_OFA) 3930 pqi_ofa_capture_event_payload(ctrl_info, event, response); 3931 } 3932 3933 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 3934 } 3935 3936 if (num_events) { 3937 event_queue->oq_ci_copy = oq_ci; 3938 writel(oq_ci, event_queue->oq_ci); 3939 schedule_work(&ctrl_info->event_work); 3940 } 3941 3942 return num_events; 3943 } 3944 3945 #define PQI_LEGACY_INTX_MASK 0x1 3946 3947 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) 3948 { 3949 u32 intx_mask; 3950 struct pqi_device_registers __iomem *pqi_registers; 3951 volatile void __iomem *register_addr; 3952 3953 pqi_registers = ctrl_info->pqi_registers; 3954 3955 if (enable_intx) 3956 register_addr = &pqi_registers->legacy_intx_mask_clear; 3957 else 3958 register_addr = &pqi_registers->legacy_intx_mask_set; 3959 3960 intx_mask = readl(register_addr); 3961 intx_mask |= PQI_LEGACY_INTX_MASK; 3962 writel(intx_mask, register_addr); 3963 } 3964 3965 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3966 enum pqi_irq_mode new_mode) 3967 { 3968 switch (ctrl_info->irq_mode) { 3969 case IRQ_MODE_MSIX: 3970 switch (new_mode) { 3971 case IRQ_MODE_MSIX: 3972 break; 3973 case IRQ_MODE_INTX: 3974 pqi_configure_legacy_intx(ctrl_info, true); 3975 sis_enable_intx(ctrl_info); 3976 break; 3977 case IRQ_MODE_NONE: 3978 break; 3979 } 3980 break; 3981 case IRQ_MODE_INTX: 3982 switch (new_mode) { 3983 case IRQ_MODE_MSIX: 3984 pqi_configure_legacy_intx(ctrl_info, false); 3985 sis_enable_msix(ctrl_info); 3986 break; 3987 case IRQ_MODE_INTX: 3988 break; 3989 case IRQ_MODE_NONE: 3990 pqi_configure_legacy_intx(ctrl_info, false); 3991 break; 3992 } 3993 break; 3994 case IRQ_MODE_NONE: 3995 switch (new_mode) { 3996 case IRQ_MODE_MSIX: 3997 sis_enable_msix(ctrl_info); 3998 break; 3999 case IRQ_MODE_INTX: 4000 pqi_configure_legacy_intx(ctrl_info, true); 4001 sis_enable_intx(ctrl_info); 4002 break; 4003 case IRQ_MODE_NONE: 4004 break; 4005 } 4006 break; 4007 } 4008 4009 ctrl_info->irq_mode = new_mode; 4010 } 4011 4012 #define PQI_LEGACY_INTX_PENDING 0x1 4013 4014 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 4015 { 4016 bool valid_irq; 4017 u32 intx_status; 4018 4019 switch (ctrl_info->irq_mode) { 4020 case IRQ_MODE_MSIX: 4021 valid_irq = true; 4022 break; 4023 case IRQ_MODE_INTX: 4024 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); 4025 if (intx_status & PQI_LEGACY_INTX_PENDING) 4026 valid_irq = true; 4027 else 4028 valid_irq = false; 4029 break; 4030 case IRQ_MODE_NONE: 4031 default: 4032 valid_irq = false; 4033 break; 4034 } 4035 4036 return valid_irq; 4037 } 4038 4039 static irqreturn_t pqi_irq_handler(int irq, void *data) 4040 { 4041 struct pqi_ctrl_info *ctrl_info; 4042 struct pqi_queue_group *queue_group; 4043 int num_io_responses_handled; 4044 int num_events_handled; 4045 4046 queue_group = data; 4047 ctrl_info = queue_group->ctrl_info; 4048 4049 if (!pqi_is_valid_irq(ctrl_info)) 4050 return IRQ_NONE; 4051 4052 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 4053 if (num_io_responses_handled < 0) 4054 goto out; 4055 4056 if (irq == ctrl_info->event_irq) { 4057 num_events_handled = pqi_process_event_intr(ctrl_info); 4058 if (num_events_handled < 0) 4059 goto out; 4060 } else { 4061 num_events_handled = 0; 4062 } 4063 4064 if (num_io_responses_handled + num_events_handled > 0) 4065 atomic_inc(&ctrl_info->num_interrupts); 4066 4067 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 4068 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 4069 4070 out: 4071 return IRQ_HANDLED; 4072 } 4073 4074 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 4075 { 4076 struct pci_dev *pci_dev = ctrl_info->pci_dev; 4077 int i; 4078 int rc; 4079 4080 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 4081 4082 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 4083 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 4084 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 4085 if (rc) { 4086 dev_err(&pci_dev->dev, 4087 "irq %u init failed with error %d\n", 4088 pci_irq_vector(pci_dev, i), rc); 4089 return rc; 4090 } 4091 ctrl_info->num_msix_vectors_initialized++; 4092 } 4093 4094 return 0; 4095 } 4096 4097 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 4098 { 4099 int i; 4100 4101 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 4102 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 4103 &ctrl_info->queue_groups[i]); 4104 4105 ctrl_info->num_msix_vectors_initialized = 0; 4106 } 4107 4108 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 4109 { 4110 int num_vectors_enabled; 4111 unsigned int flags = PCI_IRQ_MSIX; 4112 4113 if (!pqi_disable_managed_interrupts) 4114 flags |= PCI_IRQ_AFFINITY; 4115 4116 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 4117 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 4118 flags); 4119 if (num_vectors_enabled < 0) { 4120 dev_err(&ctrl_info->pci_dev->dev, 4121 "MSI-X init failed with error %d\n", 4122 num_vectors_enabled); 4123 return num_vectors_enabled; 4124 } 4125 4126 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 4127 ctrl_info->irq_mode = IRQ_MODE_MSIX; 4128 return 0; 4129 } 4130 4131 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 4132 { 4133 if (ctrl_info->num_msix_vectors_enabled) { 4134 pci_free_irq_vectors(ctrl_info->pci_dev); 4135 ctrl_info->num_msix_vectors_enabled = 0; 4136 } 4137 } 4138 4139 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 4140 { 4141 unsigned int i; 4142 size_t alloc_length; 4143 size_t element_array_length_per_iq; 4144 size_t element_array_length_per_oq; 4145 void *element_array; 4146 void __iomem *next_queue_index; 4147 void *aligned_pointer; 4148 unsigned int num_inbound_queues; 4149 unsigned int num_outbound_queues; 4150 unsigned int num_queue_indexes; 4151 struct pqi_queue_group *queue_group; 4152 4153 element_array_length_per_iq = 4154 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 4155 ctrl_info->num_elements_per_iq; 4156 element_array_length_per_oq = 4157 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 4158 ctrl_info->num_elements_per_oq; 4159 num_inbound_queues = ctrl_info->num_queue_groups * 2; 4160 num_outbound_queues = ctrl_info->num_queue_groups; 4161 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 4162 4163 aligned_pointer = NULL; 4164 4165 for (i = 0; i < num_inbound_queues; i++) { 4166 aligned_pointer = PTR_ALIGN(aligned_pointer, 4167 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4168 aligned_pointer += element_array_length_per_iq; 4169 } 4170 4171 for (i = 0; i < num_outbound_queues; i++) { 4172 aligned_pointer = PTR_ALIGN(aligned_pointer, 4173 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4174 aligned_pointer += element_array_length_per_oq; 4175 } 4176 4177 aligned_pointer = PTR_ALIGN(aligned_pointer, 4178 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4179 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 4180 PQI_EVENT_OQ_ELEMENT_LENGTH; 4181 4182 for (i = 0; i < num_queue_indexes; i++) { 4183 aligned_pointer = PTR_ALIGN(aligned_pointer, 4184 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4185 aligned_pointer += sizeof(pqi_index_t); 4186 } 4187 4188 alloc_length = (size_t)aligned_pointer + 4189 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 4190 4191 alloc_length += PQI_EXTRA_SGL_MEMORY; 4192 4193 ctrl_info->queue_memory_base = 4194 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 4195 &ctrl_info->queue_memory_base_dma_handle, 4196 GFP_KERNEL); 4197 4198 if (!ctrl_info->queue_memory_base) 4199 return -ENOMEM; 4200 4201 ctrl_info->queue_memory_length = alloc_length; 4202 4203 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 4204 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4205 4206 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4207 queue_group = &ctrl_info->queue_groups[i]; 4208 queue_group->iq_element_array[RAID_PATH] = element_array; 4209 queue_group->iq_element_array_bus_addr[RAID_PATH] = 4210 ctrl_info->queue_memory_base_dma_handle + 4211 (element_array - ctrl_info->queue_memory_base); 4212 element_array += element_array_length_per_iq; 4213 element_array = PTR_ALIGN(element_array, 4214 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4215 queue_group->iq_element_array[AIO_PATH] = element_array; 4216 queue_group->iq_element_array_bus_addr[AIO_PATH] = 4217 ctrl_info->queue_memory_base_dma_handle + 4218 (element_array - ctrl_info->queue_memory_base); 4219 element_array += element_array_length_per_iq; 4220 element_array = PTR_ALIGN(element_array, 4221 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4222 } 4223 4224 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4225 queue_group = &ctrl_info->queue_groups[i]; 4226 queue_group->oq_element_array = element_array; 4227 queue_group->oq_element_array_bus_addr = 4228 ctrl_info->queue_memory_base_dma_handle + 4229 (element_array - ctrl_info->queue_memory_base); 4230 element_array += element_array_length_per_oq; 4231 element_array = PTR_ALIGN(element_array, 4232 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4233 } 4234 4235 ctrl_info->event_queue.oq_element_array = element_array; 4236 ctrl_info->event_queue.oq_element_array_bus_addr = 4237 ctrl_info->queue_memory_base_dma_handle + 4238 (element_array - ctrl_info->queue_memory_base); 4239 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 4240 PQI_EVENT_OQ_ELEMENT_LENGTH; 4241 4242 next_queue_index = (void __iomem *)PTR_ALIGN(element_array, 4243 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4244 4245 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4246 queue_group = &ctrl_info->queue_groups[i]; 4247 queue_group->iq_ci[RAID_PATH] = next_queue_index; 4248 queue_group->iq_ci_bus_addr[RAID_PATH] = 4249 ctrl_info->queue_memory_base_dma_handle + 4250 (next_queue_index - 4251 (void __iomem *)ctrl_info->queue_memory_base); 4252 next_queue_index += sizeof(pqi_index_t); 4253 next_queue_index = PTR_ALIGN(next_queue_index, 4254 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4255 queue_group->iq_ci[AIO_PATH] = next_queue_index; 4256 queue_group->iq_ci_bus_addr[AIO_PATH] = 4257 ctrl_info->queue_memory_base_dma_handle + 4258 (next_queue_index - 4259 (void __iomem *)ctrl_info->queue_memory_base); 4260 next_queue_index += sizeof(pqi_index_t); 4261 next_queue_index = PTR_ALIGN(next_queue_index, 4262 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4263 queue_group->oq_pi = next_queue_index; 4264 queue_group->oq_pi_bus_addr = 4265 ctrl_info->queue_memory_base_dma_handle + 4266 (next_queue_index - 4267 (void __iomem *)ctrl_info->queue_memory_base); 4268 next_queue_index += sizeof(pqi_index_t); 4269 next_queue_index = PTR_ALIGN(next_queue_index, 4270 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4271 } 4272 4273 ctrl_info->event_queue.oq_pi = next_queue_index; 4274 ctrl_info->event_queue.oq_pi_bus_addr = 4275 ctrl_info->queue_memory_base_dma_handle + 4276 (next_queue_index - 4277 (void __iomem *)ctrl_info->queue_memory_base); 4278 4279 return 0; 4280 } 4281 4282 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 4283 { 4284 unsigned int i; 4285 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 4286 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 4287 4288 /* 4289 * Initialize the backpointers to the controller structure in 4290 * each operational queue group structure. 4291 */ 4292 for (i = 0; i < ctrl_info->num_queue_groups; i++) 4293 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 4294 4295 /* 4296 * Assign IDs to all operational queues. Note that the IDs 4297 * assigned to operational IQs are independent of the IDs 4298 * assigned to operational OQs. 4299 */ 4300 ctrl_info->event_queue.oq_id = next_oq_id++; 4301 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4302 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 4303 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 4304 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 4305 } 4306 4307 /* 4308 * Assign MSI-X table entry indexes to all queues. Note that the 4309 * interrupt for the event queue is shared with the first queue group. 4310 */ 4311 ctrl_info->event_queue.int_msg_num = 0; 4312 for (i = 0; i < ctrl_info->num_queue_groups; i++) 4313 ctrl_info->queue_groups[i].int_msg_num = i; 4314 4315 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4316 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 4317 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 4318 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 4319 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 4320 } 4321 } 4322 4323 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 4324 { 4325 size_t alloc_length; 4326 struct pqi_admin_queues_aligned *admin_queues_aligned; 4327 struct pqi_admin_queues *admin_queues; 4328 4329 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 4330 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 4331 4332 ctrl_info->admin_queue_memory_base = 4333 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 4334 &ctrl_info->admin_queue_memory_base_dma_handle, 4335 GFP_KERNEL); 4336 4337 if (!ctrl_info->admin_queue_memory_base) 4338 return -ENOMEM; 4339 4340 ctrl_info->admin_queue_memory_length = alloc_length; 4341 4342 admin_queues = &ctrl_info->admin_queues; 4343 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 4344 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4345 admin_queues->iq_element_array = 4346 &admin_queues_aligned->iq_element_array; 4347 admin_queues->oq_element_array = 4348 &admin_queues_aligned->oq_element_array; 4349 admin_queues->iq_ci = 4350 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci; 4351 admin_queues->oq_pi = 4352 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; 4353 4354 admin_queues->iq_element_array_bus_addr = 4355 ctrl_info->admin_queue_memory_base_dma_handle + 4356 (admin_queues->iq_element_array - 4357 ctrl_info->admin_queue_memory_base); 4358 admin_queues->oq_element_array_bus_addr = 4359 ctrl_info->admin_queue_memory_base_dma_handle + 4360 (admin_queues->oq_element_array - 4361 ctrl_info->admin_queue_memory_base); 4362 admin_queues->iq_ci_bus_addr = 4363 ctrl_info->admin_queue_memory_base_dma_handle + 4364 ((void __iomem *)admin_queues->iq_ci - 4365 (void __iomem *)ctrl_info->admin_queue_memory_base); 4366 admin_queues->oq_pi_bus_addr = 4367 ctrl_info->admin_queue_memory_base_dma_handle + 4368 ((void __iomem *)admin_queues->oq_pi - 4369 (void __iomem *)ctrl_info->admin_queue_memory_base); 4370 4371 return 0; 4372 } 4373 4374 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ 4375 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 4376 4377 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 4378 { 4379 struct pqi_device_registers __iomem *pqi_registers; 4380 struct pqi_admin_queues *admin_queues; 4381 unsigned long timeout; 4382 u8 status; 4383 u32 reg; 4384 4385 pqi_registers = ctrl_info->pqi_registers; 4386 admin_queues = &ctrl_info->admin_queues; 4387 4388 writeq((u64)admin_queues->iq_element_array_bus_addr, 4389 &pqi_registers->admin_iq_element_array_addr); 4390 writeq((u64)admin_queues->oq_element_array_bus_addr, 4391 &pqi_registers->admin_oq_element_array_addr); 4392 writeq((u64)admin_queues->iq_ci_bus_addr, 4393 &pqi_registers->admin_iq_ci_addr); 4394 writeq((u64)admin_queues->oq_pi_bus_addr, 4395 &pqi_registers->admin_oq_pi_addr); 4396 4397 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 4398 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | 4399 (admin_queues->int_msg_num << 16); 4400 writel(reg, &pqi_registers->admin_iq_num_elements); 4401 4402 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 4403 &pqi_registers->function_and_status_code); 4404 4405 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 4406 while (1) { 4407 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 4408 status = readb(&pqi_registers->function_and_status_code); 4409 if (status == PQI_STATUS_IDLE) 4410 break; 4411 if (time_after(jiffies, timeout)) 4412 return -ETIMEDOUT; 4413 } 4414 4415 /* 4416 * The offset registers are not initialized to the correct 4417 * offsets until *after* the create admin queue pair command 4418 * completes successfully. 4419 */ 4420 admin_queues->iq_pi = ctrl_info->iomem_base + 4421 PQI_DEVICE_REGISTERS_OFFSET + 4422 readq(&pqi_registers->admin_iq_pi_offset); 4423 admin_queues->oq_ci = ctrl_info->iomem_base + 4424 PQI_DEVICE_REGISTERS_OFFSET + 4425 readq(&pqi_registers->admin_oq_ci_offset); 4426 4427 return 0; 4428 } 4429 4430 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 4431 struct pqi_general_admin_request *request) 4432 { 4433 struct pqi_admin_queues *admin_queues; 4434 void *next_element; 4435 pqi_index_t iq_pi; 4436 4437 admin_queues = &ctrl_info->admin_queues; 4438 iq_pi = admin_queues->iq_pi_copy; 4439 4440 next_element = admin_queues->iq_element_array + 4441 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 4442 4443 memcpy(next_element, request, sizeof(*request)); 4444 4445 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 4446 admin_queues->iq_pi_copy = iq_pi; 4447 4448 /* 4449 * This write notifies the controller that an IU is available to be 4450 * processed. 4451 */ 4452 writel(iq_pi, admin_queues->iq_pi); 4453 } 4454 4455 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 4456 4457 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 4458 struct pqi_general_admin_response *response) 4459 { 4460 struct pqi_admin_queues *admin_queues; 4461 pqi_index_t oq_pi; 4462 pqi_index_t oq_ci; 4463 unsigned long timeout; 4464 4465 admin_queues = &ctrl_info->admin_queues; 4466 oq_ci = admin_queues->oq_ci_copy; 4467 4468 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; 4469 4470 while (1) { 4471 oq_pi = readl(admin_queues->oq_pi); 4472 if (oq_pi != oq_ci) 4473 break; 4474 if (time_after(jiffies, timeout)) { 4475 dev_err(&ctrl_info->pci_dev->dev, 4476 "timed out waiting for admin response\n"); 4477 return -ETIMEDOUT; 4478 } 4479 if (!sis_is_firmware_running(ctrl_info)) 4480 return -ENXIO; 4481 usleep_range(1000, 2000); 4482 } 4483 4484 memcpy(response, admin_queues->oq_element_array + 4485 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 4486 4487 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 4488 admin_queues->oq_ci_copy = oq_ci; 4489 writel(oq_ci, admin_queues->oq_ci); 4490 4491 return 0; 4492 } 4493 4494 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 4495 struct pqi_queue_group *queue_group, enum pqi_io_path path, 4496 struct pqi_io_request *io_request) 4497 { 4498 struct pqi_io_request *next; 4499 void *next_element; 4500 pqi_index_t iq_pi; 4501 pqi_index_t iq_ci; 4502 size_t iu_length; 4503 unsigned long flags; 4504 unsigned int num_elements_needed; 4505 unsigned int num_elements_to_end_of_queue; 4506 size_t copy_count; 4507 struct pqi_iu_header *request; 4508 4509 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 4510 4511 if (io_request) { 4512 io_request->queue_group = queue_group; 4513 list_add_tail(&io_request->request_list_entry, 4514 &queue_group->request_list[path]); 4515 } 4516 4517 iq_pi = queue_group->iq_pi_copy[path]; 4518 4519 list_for_each_entry_safe(io_request, next, 4520 &queue_group->request_list[path], request_list_entry) { 4521 4522 request = io_request->iu; 4523 4524 iu_length = get_unaligned_le16(&request->iu_length) + 4525 PQI_REQUEST_HEADER_LENGTH; 4526 num_elements_needed = 4527 DIV_ROUND_UP(iu_length, 4528 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4529 4530 iq_ci = readl(queue_group->iq_ci[path]); 4531 4532 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 4533 ctrl_info->num_elements_per_iq)) 4534 break; 4535 4536 put_unaligned_le16(queue_group->oq_id, 4537 &request->response_queue_id); 4538 4539 next_element = queue_group->iq_element_array[path] + 4540 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4541 4542 num_elements_to_end_of_queue = 4543 ctrl_info->num_elements_per_iq - iq_pi; 4544 4545 if (num_elements_needed <= num_elements_to_end_of_queue) { 4546 memcpy(next_element, request, iu_length); 4547 } else { 4548 copy_count = num_elements_to_end_of_queue * 4549 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4550 memcpy(next_element, request, copy_count); 4551 memcpy(queue_group->iq_element_array[path], 4552 (u8 *)request + copy_count, 4553 iu_length - copy_count); 4554 } 4555 4556 iq_pi = (iq_pi + num_elements_needed) % 4557 ctrl_info->num_elements_per_iq; 4558 4559 list_del(&io_request->request_list_entry); 4560 } 4561 4562 if (iq_pi != queue_group->iq_pi_copy[path]) { 4563 queue_group->iq_pi_copy[path] = iq_pi; 4564 /* 4565 * This write notifies the controller that one or more IUs are 4566 * available to be processed. 4567 */ 4568 writel(iq_pi, queue_group->iq_pi[path]); 4569 } 4570 4571 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 4572 } 4573 4574 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 4575 4576 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 4577 struct completion *wait) 4578 { 4579 int rc; 4580 4581 while (1) { 4582 if (wait_for_completion_io_timeout(wait, 4583 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { 4584 rc = 0; 4585 break; 4586 } 4587 4588 pqi_check_ctrl_health(ctrl_info); 4589 if (pqi_ctrl_offline(ctrl_info)) { 4590 rc = -ENXIO; 4591 break; 4592 } 4593 } 4594 4595 return rc; 4596 } 4597 4598 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 4599 void *context) 4600 { 4601 struct completion *waiting = context; 4602 4603 complete(waiting); 4604 } 4605 4606 static int pqi_process_raid_io_error_synchronous( 4607 struct pqi_raid_error_info *error_info) 4608 { 4609 int rc = -EIO; 4610 4611 switch (error_info->data_out_result) { 4612 case PQI_DATA_IN_OUT_GOOD: 4613 if (error_info->status == SAM_STAT_GOOD) 4614 rc = 0; 4615 break; 4616 case PQI_DATA_IN_OUT_UNDERFLOW: 4617 if (error_info->status == SAM_STAT_GOOD || 4618 error_info->status == SAM_STAT_CHECK_CONDITION) 4619 rc = 0; 4620 break; 4621 case PQI_DATA_IN_OUT_ABORTED: 4622 rc = PQI_CMD_STATUS_ABORTED; 4623 break; 4624 } 4625 4626 return rc; 4627 } 4628 4629 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request) 4630 { 4631 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0; 4632 } 4633 4634 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 4635 struct pqi_iu_header *request, unsigned int flags, 4636 struct pqi_raid_error_info *error_info) 4637 { 4638 int rc = 0; 4639 struct pqi_io_request *io_request; 4640 size_t iu_length; 4641 DECLARE_COMPLETION_ONSTACK(wait); 4642 4643 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 4644 if (down_interruptible(&ctrl_info->sync_request_sem)) 4645 return -ERESTARTSYS; 4646 } else { 4647 down(&ctrl_info->sync_request_sem); 4648 } 4649 4650 pqi_ctrl_busy(ctrl_info); 4651 /* 4652 * Wait for other admin queue updates such as; 4653 * config table changes, OFA memory updates, ... 4654 */ 4655 if (pqi_is_blockable_request(request)) 4656 pqi_wait_if_ctrl_blocked(ctrl_info); 4657 4658 if (pqi_ctrl_offline(ctrl_info)) { 4659 rc = -ENXIO; 4660 goto out; 4661 } 4662 4663 io_request = pqi_alloc_io_request(ctrl_info, NULL); 4664 4665 put_unaligned_le16(io_request->index, 4666 &(((struct pqi_raid_path_request *)request)->request_id)); 4667 4668 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 4669 ((struct pqi_raid_path_request *)request)->error_index = 4670 ((struct pqi_raid_path_request *)request)->request_id; 4671 4672 iu_length = get_unaligned_le16(&request->iu_length) + 4673 PQI_REQUEST_HEADER_LENGTH; 4674 memcpy(io_request->iu, request, iu_length); 4675 4676 io_request->io_complete_callback = pqi_raid_synchronous_complete; 4677 io_request->context = &wait; 4678 4679 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 4680 io_request); 4681 4682 pqi_wait_for_completion_io(ctrl_info, &wait); 4683 4684 if (error_info) { 4685 if (io_request->error_info) 4686 memcpy(error_info, io_request->error_info, sizeof(*error_info)); 4687 else 4688 memset(error_info, 0, sizeof(*error_info)); 4689 } else if (rc == 0 && io_request->error_info) { 4690 rc = pqi_process_raid_io_error_synchronous(io_request->error_info); 4691 } 4692 4693 pqi_free_io_request(io_request); 4694 4695 out: 4696 pqi_ctrl_unbusy(ctrl_info); 4697 up(&ctrl_info->sync_request_sem); 4698 4699 return rc; 4700 } 4701 4702 static int pqi_validate_admin_response( 4703 struct pqi_general_admin_response *response, u8 expected_function_code) 4704 { 4705 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 4706 return -EINVAL; 4707 4708 if (get_unaligned_le16(&response->header.iu_length) != 4709 PQI_GENERAL_ADMIN_IU_LENGTH) 4710 return -EINVAL; 4711 4712 if (response->function_code != expected_function_code) 4713 return -EINVAL; 4714 4715 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 4716 return -EINVAL; 4717 4718 return 0; 4719 } 4720 4721 static int pqi_submit_admin_request_synchronous( 4722 struct pqi_ctrl_info *ctrl_info, 4723 struct pqi_general_admin_request *request, 4724 struct pqi_general_admin_response *response) 4725 { 4726 int rc; 4727 4728 pqi_submit_admin_request(ctrl_info, request); 4729 4730 rc = pqi_poll_for_admin_response(ctrl_info, response); 4731 4732 if (rc == 0) 4733 rc = pqi_validate_admin_response(response, request->function_code); 4734 4735 return rc; 4736 } 4737 4738 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 4739 { 4740 int rc; 4741 struct pqi_general_admin_request request; 4742 struct pqi_general_admin_response response; 4743 struct pqi_device_capability *capability; 4744 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 4745 4746 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 4747 if (!capability) 4748 return -ENOMEM; 4749 4750 memset(&request, 0, sizeof(request)); 4751 4752 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4753 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4754 &request.header.iu_length); 4755 request.function_code = 4756 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 4757 put_unaligned_le32(sizeof(*capability), 4758 &request.data.report_device_capability.buffer_length); 4759 4760 rc = pqi_map_single(ctrl_info->pci_dev, 4761 &request.data.report_device_capability.sg_descriptor, 4762 capability, sizeof(*capability), 4763 DMA_FROM_DEVICE); 4764 if (rc) 4765 goto out; 4766 4767 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response); 4768 4769 pqi_pci_unmap(ctrl_info->pci_dev, 4770 &request.data.report_device_capability.sg_descriptor, 1, 4771 DMA_FROM_DEVICE); 4772 4773 if (rc) 4774 goto out; 4775 4776 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 4777 rc = -EIO; 4778 goto out; 4779 } 4780 4781 ctrl_info->max_inbound_queues = 4782 get_unaligned_le16(&capability->max_inbound_queues); 4783 ctrl_info->max_elements_per_iq = 4784 get_unaligned_le16(&capability->max_elements_per_iq); 4785 ctrl_info->max_iq_element_length = 4786 get_unaligned_le16(&capability->max_iq_element_length) 4787 * 16; 4788 ctrl_info->max_outbound_queues = 4789 get_unaligned_le16(&capability->max_outbound_queues); 4790 ctrl_info->max_elements_per_oq = 4791 get_unaligned_le16(&capability->max_elements_per_oq); 4792 ctrl_info->max_oq_element_length = 4793 get_unaligned_le16(&capability->max_oq_element_length) 4794 * 16; 4795 4796 sop_iu_layer_descriptor = 4797 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 4798 4799 ctrl_info->max_inbound_iu_length_per_firmware = 4800 get_unaligned_le16( 4801 &sop_iu_layer_descriptor->max_inbound_iu_length); 4802 ctrl_info->inbound_spanning_supported = 4803 sop_iu_layer_descriptor->inbound_spanning_supported; 4804 ctrl_info->outbound_spanning_supported = 4805 sop_iu_layer_descriptor->outbound_spanning_supported; 4806 4807 out: 4808 kfree(capability); 4809 4810 return rc; 4811 } 4812 4813 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 4814 { 4815 if (ctrl_info->max_iq_element_length < 4816 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4817 dev_err(&ctrl_info->pci_dev->dev, 4818 "max. inbound queue element length of %d is less than the required length of %d\n", 4819 ctrl_info->max_iq_element_length, 4820 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4821 return -EINVAL; 4822 } 4823 4824 if (ctrl_info->max_oq_element_length < 4825 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 4826 dev_err(&ctrl_info->pci_dev->dev, 4827 "max. outbound queue element length of %d is less than the required length of %d\n", 4828 ctrl_info->max_oq_element_length, 4829 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 4830 return -EINVAL; 4831 } 4832 4833 if (ctrl_info->max_inbound_iu_length_per_firmware < 4834 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4835 dev_err(&ctrl_info->pci_dev->dev, 4836 "max. inbound IU length of %u is less than the min. required length of %d\n", 4837 ctrl_info->max_inbound_iu_length_per_firmware, 4838 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4839 return -EINVAL; 4840 } 4841 4842 if (!ctrl_info->inbound_spanning_supported) { 4843 dev_err(&ctrl_info->pci_dev->dev, 4844 "the controller does not support inbound spanning\n"); 4845 return -EINVAL; 4846 } 4847 4848 if (ctrl_info->outbound_spanning_supported) { 4849 dev_err(&ctrl_info->pci_dev->dev, 4850 "the controller supports outbound spanning but this driver does not\n"); 4851 return -EINVAL; 4852 } 4853 4854 return 0; 4855 } 4856 4857 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 4858 { 4859 int rc; 4860 struct pqi_event_queue *event_queue; 4861 struct pqi_general_admin_request request; 4862 struct pqi_general_admin_response response; 4863 4864 event_queue = &ctrl_info->event_queue; 4865 4866 /* 4867 * Create OQ (Outbound Queue - device to host queue) to dedicate 4868 * to events. 4869 */ 4870 memset(&request, 0, sizeof(request)); 4871 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4872 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4873 &request.header.iu_length); 4874 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4875 put_unaligned_le16(event_queue->oq_id, 4876 &request.data.create_operational_oq.queue_id); 4877 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 4878 &request.data.create_operational_oq.element_array_addr); 4879 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 4880 &request.data.create_operational_oq.pi_addr); 4881 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 4882 &request.data.create_operational_oq.num_elements); 4883 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 4884 &request.data.create_operational_oq.element_length); 4885 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4886 put_unaligned_le16(event_queue->int_msg_num, 4887 &request.data.create_operational_oq.int_msg_num); 4888 4889 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4890 &response); 4891 if (rc) 4892 return rc; 4893 4894 event_queue->oq_ci = ctrl_info->iomem_base + 4895 PQI_DEVICE_REGISTERS_OFFSET + 4896 get_unaligned_le64( 4897 &response.data.create_operational_oq.oq_ci_offset); 4898 4899 return 0; 4900 } 4901 4902 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 4903 unsigned int group_number) 4904 { 4905 int rc; 4906 struct pqi_queue_group *queue_group; 4907 struct pqi_general_admin_request request; 4908 struct pqi_general_admin_response response; 4909 4910 queue_group = &ctrl_info->queue_groups[group_number]; 4911 4912 /* 4913 * Create IQ (Inbound Queue - host to device queue) for 4914 * RAID path. 4915 */ 4916 memset(&request, 0, sizeof(request)); 4917 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4918 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4919 &request.header.iu_length); 4920 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4921 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 4922 &request.data.create_operational_iq.queue_id); 4923 put_unaligned_le64( 4924 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 4925 &request.data.create_operational_iq.element_array_addr); 4926 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 4927 &request.data.create_operational_iq.ci_addr); 4928 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4929 &request.data.create_operational_iq.num_elements); 4930 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4931 &request.data.create_operational_iq.element_length); 4932 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4933 4934 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4935 &response); 4936 if (rc) { 4937 dev_err(&ctrl_info->pci_dev->dev, 4938 "error creating inbound RAID queue\n"); 4939 return rc; 4940 } 4941 4942 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 4943 PQI_DEVICE_REGISTERS_OFFSET + 4944 get_unaligned_le64( 4945 &response.data.create_operational_iq.iq_pi_offset); 4946 4947 /* 4948 * Create IQ (Inbound Queue - host to device queue) for 4949 * Advanced I/O (AIO) path. 4950 */ 4951 memset(&request, 0, sizeof(request)); 4952 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4953 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4954 &request.header.iu_length); 4955 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4956 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4957 &request.data.create_operational_iq.queue_id); 4958 put_unaligned_le64((u64)queue_group-> 4959 iq_element_array_bus_addr[AIO_PATH], 4960 &request.data.create_operational_iq.element_array_addr); 4961 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4962 &request.data.create_operational_iq.ci_addr); 4963 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4964 &request.data.create_operational_iq.num_elements); 4965 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4966 &request.data.create_operational_iq.element_length); 4967 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4968 4969 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4970 &response); 4971 if (rc) { 4972 dev_err(&ctrl_info->pci_dev->dev, 4973 "error creating inbound AIO queue\n"); 4974 return rc; 4975 } 4976 4977 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4978 PQI_DEVICE_REGISTERS_OFFSET + 4979 get_unaligned_le64( 4980 &response.data.create_operational_iq.iq_pi_offset); 4981 4982 /* 4983 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4984 * assumed to be for RAID path I/O unless we change the queue's 4985 * property. 4986 */ 4987 memset(&request, 0, sizeof(request)); 4988 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4989 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4990 &request.header.iu_length); 4991 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4992 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4993 &request.data.change_operational_iq_properties.queue_id); 4994 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4995 &request.data.change_operational_iq_properties.vendor_specific); 4996 4997 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4998 &response); 4999 if (rc) { 5000 dev_err(&ctrl_info->pci_dev->dev, 5001 "error changing queue property\n"); 5002 return rc; 5003 } 5004 5005 /* 5006 * Create OQ (Outbound Queue - device to host queue). 5007 */ 5008 memset(&request, 0, sizeof(request)); 5009 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 5010 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 5011 &request.header.iu_length); 5012 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 5013 put_unaligned_le16(queue_group->oq_id, 5014 &request.data.create_operational_oq.queue_id); 5015 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 5016 &request.data.create_operational_oq.element_array_addr); 5017 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 5018 &request.data.create_operational_oq.pi_addr); 5019 put_unaligned_le16(ctrl_info->num_elements_per_oq, 5020 &request.data.create_operational_oq.num_elements); 5021 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 5022 &request.data.create_operational_oq.element_length); 5023 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 5024 put_unaligned_le16(queue_group->int_msg_num, 5025 &request.data.create_operational_oq.int_msg_num); 5026 5027 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 5028 &response); 5029 if (rc) { 5030 dev_err(&ctrl_info->pci_dev->dev, 5031 "error creating outbound queue\n"); 5032 return rc; 5033 } 5034 5035 queue_group->oq_ci = ctrl_info->iomem_base + 5036 PQI_DEVICE_REGISTERS_OFFSET + 5037 get_unaligned_le64( 5038 &response.data.create_operational_oq.oq_ci_offset); 5039 5040 return 0; 5041 } 5042 5043 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 5044 { 5045 int rc; 5046 unsigned int i; 5047 5048 rc = pqi_create_event_queue(ctrl_info); 5049 if (rc) { 5050 dev_err(&ctrl_info->pci_dev->dev, 5051 "error creating event queue\n"); 5052 return rc; 5053 } 5054 5055 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5056 rc = pqi_create_queue_group(ctrl_info, i); 5057 if (rc) { 5058 dev_err(&ctrl_info->pci_dev->dev, 5059 "error creating queue group number %u/%u\n", 5060 i, ctrl_info->num_queue_groups); 5061 return rc; 5062 } 5063 } 5064 5065 return 0; 5066 } 5067 5068 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 5069 struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS) 5070 5071 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 5072 bool enable_events) 5073 { 5074 int rc; 5075 unsigned int i; 5076 struct pqi_event_config *event_config; 5077 struct pqi_event_descriptor *event_descriptor; 5078 struct pqi_general_management_request request; 5079 5080 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5081 GFP_KERNEL); 5082 if (!event_config) 5083 return -ENOMEM; 5084 5085 memset(&request, 0, sizeof(request)); 5086 5087 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 5088 put_unaligned_le16(offsetof(struct pqi_general_management_request, 5089 data.report_event_configuration.sg_descriptors[1]) - 5090 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 5091 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5092 &request.data.report_event_configuration.buffer_length); 5093 5094 rc = pqi_map_single(ctrl_info->pci_dev, 5095 request.data.report_event_configuration.sg_descriptors, 5096 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5097 DMA_FROM_DEVICE); 5098 if (rc) 5099 goto out; 5100 5101 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 5102 5103 pqi_pci_unmap(ctrl_info->pci_dev, 5104 request.data.report_event_configuration.sg_descriptors, 1, 5105 DMA_FROM_DEVICE); 5106 5107 if (rc) 5108 goto out; 5109 5110 for (i = 0; i < event_config->num_event_descriptors; i++) { 5111 event_descriptor = &event_config->descriptors[i]; 5112 if (enable_events && 5113 pqi_is_supported_event(event_descriptor->event_type)) 5114 put_unaligned_le16(ctrl_info->event_queue.oq_id, 5115 &event_descriptor->oq_id); 5116 else 5117 put_unaligned_le16(0, &event_descriptor->oq_id); 5118 } 5119 5120 memset(&request, 0, sizeof(request)); 5121 5122 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 5123 put_unaligned_le16(offsetof(struct pqi_general_management_request, 5124 data.report_event_configuration.sg_descriptors[1]) - 5125 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 5126 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5127 &request.data.report_event_configuration.buffer_length); 5128 5129 rc = pqi_map_single(ctrl_info->pci_dev, 5130 request.data.report_event_configuration.sg_descriptors, 5131 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5132 DMA_TO_DEVICE); 5133 if (rc) 5134 goto out; 5135 5136 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 5137 5138 pqi_pci_unmap(ctrl_info->pci_dev, 5139 request.data.report_event_configuration.sg_descriptors, 1, 5140 DMA_TO_DEVICE); 5141 5142 out: 5143 kfree(event_config); 5144 5145 return rc; 5146 } 5147 5148 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 5149 { 5150 return pqi_configure_events(ctrl_info, true); 5151 } 5152 5153 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 5154 { 5155 unsigned int i; 5156 struct device *dev; 5157 size_t sg_chain_buffer_length; 5158 struct pqi_io_request *io_request; 5159 5160 if (!ctrl_info->io_request_pool) 5161 return; 5162 5163 dev = &ctrl_info->pci_dev->dev; 5164 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 5165 io_request = ctrl_info->io_request_pool; 5166 5167 for (i = 0; i < ctrl_info->max_io_slots; i++) { 5168 kfree(io_request->iu); 5169 if (!io_request->sg_chain_buffer) 5170 break; 5171 dma_free_coherent(dev, sg_chain_buffer_length, 5172 io_request->sg_chain_buffer, 5173 io_request->sg_chain_buffer_dma_handle); 5174 io_request++; 5175 } 5176 5177 kfree(ctrl_info->io_request_pool); 5178 ctrl_info->io_request_pool = NULL; 5179 } 5180 5181 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 5182 { 5183 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, 5184 ctrl_info->error_buffer_length, 5185 &ctrl_info->error_buffer_dma_handle, 5186 GFP_KERNEL); 5187 if (!ctrl_info->error_buffer) 5188 return -ENOMEM; 5189 5190 return 0; 5191 } 5192 5193 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 5194 { 5195 unsigned int i; 5196 void *sg_chain_buffer; 5197 size_t sg_chain_buffer_length; 5198 dma_addr_t sg_chain_buffer_dma_handle; 5199 struct device *dev; 5200 struct pqi_io_request *io_request; 5201 5202 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, 5203 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 5204 5205 if (!ctrl_info->io_request_pool) { 5206 dev_err(&ctrl_info->pci_dev->dev, 5207 "failed to allocate I/O request pool\n"); 5208 goto error; 5209 } 5210 5211 dev = &ctrl_info->pci_dev->dev; 5212 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 5213 io_request = ctrl_info->io_request_pool; 5214 5215 for (i = 0; i < ctrl_info->max_io_slots; i++) { 5216 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 5217 5218 if (!io_request->iu) { 5219 dev_err(&ctrl_info->pci_dev->dev, 5220 "failed to allocate IU buffers\n"); 5221 goto error; 5222 } 5223 5224 sg_chain_buffer = dma_alloc_coherent(dev, 5225 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 5226 GFP_KERNEL); 5227 5228 if (!sg_chain_buffer) { 5229 dev_err(&ctrl_info->pci_dev->dev, 5230 "failed to allocate PQI scatter-gather chain buffers\n"); 5231 goto error; 5232 } 5233 5234 io_request->index = i; 5235 io_request->sg_chain_buffer = sg_chain_buffer; 5236 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle; 5237 io_request++; 5238 } 5239 5240 return 0; 5241 5242 error: 5243 pqi_free_all_io_requests(ctrl_info); 5244 5245 return -ENOMEM; 5246 } 5247 5248 /* 5249 * Calculate required resources that are sized based on max. outstanding 5250 * requests and max. transfer size. 5251 */ 5252 5253 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 5254 { 5255 u32 max_transfer_size; 5256 u32 max_sg_entries; 5257 5258 ctrl_info->scsi_ml_can_queue = 5259 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 5260 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 5261 5262 ctrl_info->error_buffer_length = 5263 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 5264 5265 if (is_kdump_kernel()) 5266 max_transfer_size = min(ctrl_info->max_transfer_size, 5267 PQI_MAX_TRANSFER_SIZE_KDUMP); 5268 else 5269 max_transfer_size = min(ctrl_info->max_transfer_size, 5270 PQI_MAX_TRANSFER_SIZE); 5271 5272 max_sg_entries = max_transfer_size / PAGE_SIZE; 5273 5274 /* +1 to cover when the buffer is not page-aligned. */ 5275 max_sg_entries++; 5276 5277 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 5278 5279 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 5280 5281 ctrl_info->sg_chain_buffer_length = 5282 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 5283 PQI_EXTRA_SGL_MEMORY; 5284 ctrl_info->sg_tablesize = max_sg_entries; 5285 ctrl_info->max_sectors = max_transfer_size / 512; 5286 } 5287 5288 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 5289 { 5290 int num_queue_groups; 5291 u16 num_elements_per_iq; 5292 u16 num_elements_per_oq; 5293 5294 if (is_kdump_kernel()) { 5295 num_queue_groups = 1; 5296 } else { 5297 int num_cpus; 5298 int max_queue_groups; 5299 5300 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 5301 ctrl_info->max_outbound_queues - 1); 5302 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 5303 5304 num_cpus = num_online_cpus(); 5305 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 5306 num_queue_groups = min(num_queue_groups, max_queue_groups); 5307 } 5308 5309 ctrl_info->num_queue_groups = num_queue_groups; 5310 5311 /* 5312 * Make sure that the max. inbound IU length is an even multiple 5313 * of our inbound element length. 5314 */ 5315 ctrl_info->max_inbound_iu_length = 5316 (ctrl_info->max_inbound_iu_length_per_firmware / 5317 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 5318 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 5319 5320 num_elements_per_iq = 5321 (ctrl_info->max_inbound_iu_length / 5322 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 5323 5324 /* Add one because one element in each queue is unusable. */ 5325 num_elements_per_iq++; 5326 5327 num_elements_per_iq = min(num_elements_per_iq, 5328 ctrl_info->max_elements_per_iq); 5329 5330 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 5331 num_elements_per_oq = min(num_elements_per_oq, 5332 ctrl_info->max_elements_per_oq); 5333 5334 ctrl_info->num_elements_per_iq = num_elements_per_iq; 5335 ctrl_info->num_elements_per_oq = num_elements_per_oq; 5336 5337 ctrl_info->max_sg_per_iu = 5338 ((ctrl_info->max_inbound_iu_length - 5339 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 5340 sizeof(struct pqi_sg_descriptor)) + 5341 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 5342 5343 ctrl_info->max_sg_per_r56_iu = 5344 ((ctrl_info->max_inbound_iu_length - 5345 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 5346 sizeof(struct pqi_sg_descriptor)) + 5347 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS; 5348 } 5349 5350 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor, 5351 struct scatterlist *sg) 5352 { 5353 u64 address = (u64)sg_dma_address(sg); 5354 unsigned int length = sg_dma_len(sg); 5355 5356 put_unaligned_le64(address, &sg_descriptor->address); 5357 put_unaligned_le32(length, &sg_descriptor->length); 5358 put_unaligned_le32(0, &sg_descriptor->flags); 5359 } 5360 5361 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor, 5362 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request, 5363 int max_sg_per_iu, bool *chained) 5364 { 5365 int i; 5366 unsigned int num_sg_in_iu; 5367 5368 *chained = false; 5369 i = 0; 5370 num_sg_in_iu = 0; 5371 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */ 5372 5373 while (1) { 5374 pqi_set_sg_descriptor(sg_descriptor, sg); 5375 if (!*chained) 5376 num_sg_in_iu++; 5377 i++; 5378 if (i == sg_count) 5379 break; 5380 sg_descriptor++; 5381 if (i == max_sg_per_iu) { 5382 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle, 5383 &sg_descriptor->address); 5384 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor), 5385 &sg_descriptor->length); 5386 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags); 5387 *chained = true; 5388 num_sg_in_iu++; 5389 sg_descriptor = io_request->sg_chain_buffer; 5390 } 5391 sg = sg_next(sg); 5392 } 5393 5394 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 5395 5396 return num_sg_in_iu; 5397 } 5398 5399 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 5400 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 5401 struct pqi_io_request *io_request) 5402 { 5403 u16 iu_length; 5404 int sg_count; 5405 bool chained; 5406 unsigned int num_sg_in_iu; 5407 struct scatterlist *sg; 5408 struct pqi_sg_descriptor *sg_descriptor; 5409 5410 sg_count = scsi_dma_map(scmd); 5411 if (sg_count < 0) 5412 return sg_count; 5413 5414 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 5415 PQI_REQUEST_HEADER_LENGTH; 5416 5417 if (sg_count == 0) 5418 goto out; 5419 5420 sg = scsi_sglist(scmd); 5421 sg_descriptor = request->sg_descriptors; 5422 5423 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5424 ctrl_info->max_sg_per_iu, &chained); 5425 5426 request->partial = chained; 5427 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5428 5429 out: 5430 put_unaligned_le16(iu_length, &request->header.iu_length); 5431 5432 return 0; 5433 } 5434 5435 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, 5436 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd, 5437 struct pqi_io_request *io_request) 5438 { 5439 u16 iu_length; 5440 int sg_count; 5441 bool chained; 5442 unsigned int num_sg_in_iu; 5443 struct scatterlist *sg; 5444 struct pqi_sg_descriptor *sg_descriptor; 5445 5446 sg_count = scsi_dma_map(scmd); 5447 if (sg_count < 0) 5448 return sg_count; 5449 5450 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) - 5451 PQI_REQUEST_HEADER_LENGTH; 5452 num_sg_in_iu = 0; 5453 5454 if (sg_count == 0) 5455 goto out; 5456 5457 sg = scsi_sglist(scmd); 5458 sg_descriptor = request->sg_descriptors; 5459 5460 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5461 ctrl_info->max_sg_per_iu, &chained); 5462 5463 request->partial = chained; 5464 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5465 5466 out: 5467 put_unaligned_le16(iu_length, &request->header.iu_length); 5468 request->num_sg_descriptors = num_sg_in_iu; 5469 5470 return 0; 5471 } 5472 5473 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, 5474 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd, 5475 struct pqi_io_request *io_request) 5476 { 5477 u16 iu_length; 5478 int sg_count; 5479 bool chained; 5480 unsigned int num_sg_in_iu; 5481 struct scatterlist *sg; 5482 struct pqi_sg_descriptor *sg_descriptor; 5483 5484 sg_count = scsi_dma_map(scmd); 5485 if (sg_count < 0) 5486 return sg_count; 5487 5488 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - 5489 PQI_REQUEST_HEADER_LENGTH; 5490 num_sg_in_iu = 0; 5491 5492 if (sg_count != 0) { 5493 sg = scsi_sglist(scmd); 5494 sg_descriptor = request->sg_descriptors; 5495 5496 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5497 ctrl_info->max_sg_per_r56_iu, &chained); 5498 5499 request->partial = chained; 5500 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5501 } 5502 5503 put_unaligned_le16(iu_length, &request->header.iu_length); 5504 request->num_sg_descriptors = num_sg_in_iu; 5505 5506 return 0; 5507 } 5508 5509 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 5510 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 5511 struct pqi_io_request *io_request) 5512 { 5513 u16 iu_length; 5514 int sg_count; 5515 bool chained; 5516 unsigned int num_sg_in_iu; 5517 struct scatterlist *sg; 5518 struct pqi_sg_descriptor *sg_descriptor; 5519 5520 sg_count = scsi_dma_map(scmd); 5521 if (sg_count < 0) 5522 return sg_count; 5523 5524 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 5525 PQI_REQUEST_HEADER_LENGTH; 5526 num_sg_in_iu = 0; 5527 5528 if (sg_count == 0) 5529 goto out; 5530 5531 sg = scsi_sglist(scmd); 5532 sg_descriptor = request->sg_descriptors; 5533 5534 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5535 ctrl_info->max_sg_per_iu, &chained); 5536 5537 request->partial = chained; 5538 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5539 5540 out: 5541 put_unaligned_le16(iu_length, &request->header.iu_length); 5542 request->num_sg_descriptors = num_sg_in_iu; 5543 5544 return 0; 5545 } 5546 5547 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 5548 void *context) 5549 { 5550 struct scsi_cmnd *scmd; 5551 5552 scmd = io_request->scmd; 5553 pqi_free_io_request(io_request); 5554 scsi_dma_unmap(scmd); 5555 pqi_scsi_done(scmd); 5556 } 5557 5558 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, 5559 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5560 struct pqi_queue_group *queue_group, bool io_high_prio) 5561 { 5562 int rc; 5563 size_t cdb_length; 5564 struct pqi_io_request *io_request; 5565 struct pqi_raid_path_request *request; 5566 5567 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5568 if (!io_request) 5569 return SCSI_MLQUEUE_HOST_BUSY; 5570 5571 io_request->io_complete_callback = pqi_raid_io_complete; 5572 io_request->scmd = scmd; 5573 5574 request = io_request->iu; 5575 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); 5576 5577 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 5578 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5579 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5580 request->command_priority = io_high_prio; 5581 put_unaligned_le16(io_request->index, &request->request_id); 5582 request->error_index = request->request_id; 5583 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); 5584 request->ml_device_lun_number = (u8)scmd->device->lun; 5585 5586 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 5587 memcpy(request->cdb, scmd->cmnd, cdb_length); 5588 5589 switch (cdb_length) { 5590 case 6: 5591 case 10: 5592 case 12: 5593 case 16: 5594 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 5595 break; 5596 case 20: 5597 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4; 5598 break; 5599 case 24: 5600 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8; 5601 break; 5602 case 28: 5603 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12; 5604 break; 5605 case 32: 5606 default: 5607 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16; 5608 break; 5609 } 5610 5611 switch (scmd->sc_data_direction) { 5612 case DMA_FROM_DEVICE: 5613 request->data_direction = SOP_READ_FLAG; 5614 break; 5615 case DMA_TO_DEVICE: 5616 request->data_direction = SOP_WRITE_FLAG; 5617 break; 5618 case DMA_NONE: 5619 request->data_direction = SOP_NO_DIRECTION_FLAG; 5620 break; 5621 case DMA_BIDIRECTIONAL: 5622 request->data_direction = SOP_BIDIRECTIONAL; 5623 break; 5624 default: 5625 dev_err(&ctrl_info->pci_dev->dev, 5626 "unknown data direction: %d\n", 5627 scmd->sc_data_direction); 5628 break; 5629 } 5630 5631 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 5632 if (rc) { 5633 pqi_free_io_request(io_request); 5634 return SCSI_MLQUEUE_HOST_BUSY; 5635 } 5636 5637 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 5638 5639 return 0; 5640 } 5641 5642 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5643 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5644 struct pqi_queue_group *queue_group) 5645 { 5646 bool io_high_prio; 5647 5648 io_high_prio = pqi_is_io_high_priority(device, scmd); 5649 5650 return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio); 5651 } 5652 5653 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 5654 { 5655 struct scsi_cmnd *scmd; 5656 struct pqi_scsi_dev *device; 5657 struct pqi_ctrl_info *ctrl_info; 5658 5659 if (!io_request->raid_bypass) 5660 return false; 5661 5662 scmd = io_request->scmd; 5663 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 5664 return false; 5665 if (host_byte(scmd->result) == DID_NO_CONNECT) 5666 return false; 5667 5668 device = scmd->device->hostdata; 5669 if (pqi_device_offline(device) || pqi_device_in_remove(device)) 5670 return false; 5671 5672 ctrl_info = shost_to_hba(scmd->device->host); 5673 if (pqi_ctrl_offline(ctrl_info)) 5674 return false; 5675 5676 return true; 5677 } 5678 5679 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 5680 void *context) 5681 { 5682 struct scsi_cmnd *scmd; 5683 5684 scmd = io_request->scmd; 5685 scsi_dma_unmap(scmd); 5686 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) { 5687 set_host_byte(scmd, DID_IMM_RETRY); 5688 pqi_cmd_priv(scmd)->this_residual++; 5689 } 5690 5691 pqi_free_io_request(io_request); 5692 pqi_scsi_done(scmd); 5693 } 5694 5695 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5696 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5697 struct pqi_queue_group *queue_group) 5698 { 5699 bool io_high_prio; 5700 5701 io_high_prio = pqi_is_io_high_priority(device, scmd); 5702 5703 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 5704 scmd->cmnd, scmd->cmd_len, queue_group, NULL, 5705 false, io_high_prio); 5706 } 5707 5708 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 5709 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 5710 unsigned int cdb_length, struct pqi_queue_group *queue_group, 5711 struct pqi_encryption_info *encryption_info, bool raid_bypass, 5712 bool io_high_prio) 5713 { 5714 int rc; 5715 struct pqi_io_request *io_request; 5716 struct pqi_aio_path_request *request; 5717 5718 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5719 if (!io_request) 5720 return SCSI_MLQUEUE_HOST_BUSY; 5721 5722 io_request->io_complete_callback = pqi_aio_io_complete; 5723 io_request->scmd = scmd; 5724 io_request->raid_bypass = raid_bypass; 5725 5726 request = io_request->iu; 5727 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors)); 5728 5729 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 5730 put_unaligned_le32(aio_handle, &request->nexus_id); 5731 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5732 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5733 request->command_priority = io_high_prio; 5734 put_unaligned_le16(io_request->index, &request->request_id); 5735 request->error_index = request->request_id; 5736 if (!raid_bypass && ctrl_info->multi_lun_device_supported) 5737 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number); 5738 if (cdb_length > sizeof(request->cdb)) 5739 cdb_length = sizeof(request->cdb); 5740 request->cdb_length = cdb_length; 5741 memcpy(request->cdb, cdb, cdb_length); 5742 5743 switch (scmd->sc_data_direction) { 5744 case DMA_TO_DEVICE: 5745 request->data_direction = SOP_READ_FLAG; 5746 break; 5747 case DMA_FROM_DEVICE: 5748 request->data_direction = SOP_WRITE_FLAG; 5749 break; 5750 case DMA_NONE: 5751 request->data_direction = SOP_NO_DIRECTION_FLAG; 5752 break; 5753 case DMA_BIDIRECTIONAL: 5754 request->data_direction = SOP_BIDIRECTIONAL; 5755 break; 5756 default: 5757 dev_err(&ctrl_info->pci_dev->dev, 5758 "unknown data direction: %d\n", 5759 scmd->sc_data_direction); 5760 break; 5761 } 5762 5763 if (encryption_info) { 5764 request->encryption_enable = true; 5765 put_unaligned_le16(encryption_info->data_encryption_key_index, 5766 &request->data_encryption_key_index); 5767 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5768 &request->encrypt_tweak_lower); 5769 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5770 &request->encrypt_tweak_upper); 5771 } 5772 5773 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 5774 if (rc) { 5775 pqi_free_io_request(io_request); 5776 return SCSI_MLQUEUE_HOST_BUSY; 5777 } 5778 5779 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5780 5781 return 0; 5782 } 5783 5784 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, 5785 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 5786 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 5787 struct pqi_scsi_dev_raid_map_data *rmd) 5788 { 5789 int rc; 5790 struct pqi_io_request *io_request; 5791 struct pqi_aio_r1_path_request *r1_request; 5792 5793 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5794 if (!io_request) 5795 return SCSI_MLQUEUE_HOST_BUSY; 5796 5797 io_request->io_complete_callback = pqi_aio_io_complete; 5798 io_request->scmd = scmd; 5799 io_request->raid_bypass = true; 5800 5801 r1_request = io_request->iu; 5802 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors)); 5803 5804 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO; 5805 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id); 5806 r1_request->num_drives = rmd->num_it_nexus_entries; 5807 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1); 5808 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2); 5809 if (rmd->num_it_nexus_entries == 3) 5810 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3); 5811 5812 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length); 5813 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5814 put_unaligned_le16(io_request->index, &r1_request->request_id); 5815 r1_request->error_index = r1_request->request_id; 5816 if (rmd->cdb_length > sizeof(r1_request->cdb)) 5817 rmd->cdb_length = sizeof(r1_request->cdb); 5818 r1_request->cdb_length = rmd->cdb_length; 5819 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); 5820 5821 /* The direction is always write. */ 5822 r1_request->data_direction = SOP_READ_FLAG; 5823 5824 if (encryption_info) { 5825 r1_request->encryption_enable = true; 5826 put_unaligned_le16(encryption_info->data_encryption_key_index, 5827 &r1_request->data_encryption_key_index); 5828 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5829 &r1_request->encrypt_tweak_lower); 5830 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5831 &r1_request->encrypt_tweak_upper); 5832 } 5833 5834 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request); 5835 if (rc) { 5836 pqi_free_io_request(io_request); 5837 return SCSI_MLQUEUE_HOST_BUSY; 5838 } 5839 5840 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5841 5842 return 0; 5843 } 5844 5845 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, 5846 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 5847 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 5848 struct pqi_scsi_dev_raid_map_data *rmd) 5849 { 5850 int rc; 5851 struct pqi_io_request *io_request; 5852 struct pqi_aio_r56_path_request *r56_request; 5853 5854 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5855 if (!io_request) 5856 return SCSI_MLQUEUE_HOST_BUSY; 5857 io_request->io_complete_callback = pqi_aio_io_complete; 5858 io_request->scmd = scmd; 5859 io_request->raid_bypass = true; 5860 5861 r56_request = io_request->iu; 5862 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors)); 5863 5864 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) 5865 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; 5866 else 5867 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; 5868 5869 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id); 5870 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); 5871 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); 5872 if (rmd->raid_level == SA_RAID_6) { 5873 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); 5874 r56_request->xor_multiplier = rmd->xor_mult; 5875 } 5876 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length); 5877 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5878 put_unaligned_le64(rmd->row, &r56_request->row); 5879 5880 put_unaligned_le16(io_request->index, &r56_request->request_id); 5881 r56_request->error_index = r56_request->request_id; 5882 5883 if (rmd->cdb_length > sizeof(r56_request->cdb)) 5884 rmd->cdb_length = sizeof(r56_request->cdb); 5885 r56_request->cdb_length = rmd->cdb_length; 5886 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); 5887 5888 /* The direction is always write. */ 5889 r56_request->data_direction = SOP_READ_FLAG; 5890 5891 if (encryption_info) { 5892 r56_request->encryption_enable = true; 5893 put_unaligned_le16(encryption_info->data_encryption_key_index, 5894 &r56_request->data_encryption_key_index); 5895 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5896 &r56_request->encrypt_tweak_lower); 5897 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5898 &r56_request->encrypt_tweak_upper); 5899 } 5900 5901 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); 5902 if (rc) { 5903 pqi_free_io_request(io_request); 5904 return SCSI_MLQUEUE_HOST_BUSY; 5905 } 5906 5907 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5908 5909 return 0; 5910 } 5911 5912 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 5913 struct scsi_cmnd *scmd) 5914 { 5915 /* 5916 * We are setting host_tagset = 1 during init. 5917 */ 5918 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd))); 5919 } 5920 5921 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) 5922 { 5923 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) 5924 return false; 5925 5926 return pqi_cmd_priv(scmd)->this_residual == 0; 5927 } 5928 5929 /* 5930 * This function gets called just before we hand the completed SCSI request 5931 * back to the SML. 5932 */ 5933 5934 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 5935 { 5936 struct pqi_scsi_dev *device; 5937 struct completion *wait; 5938 5939 if (!scmd->device) { 5940 set_host_byte(scmd, DID_NO_CONNECT); 5941 return; 5942 } 5943 5944 device = scmd->device->hostdata; 5945 if (!device) { 5946 set_host_byte(scmd, DID_NO_CONNECT); 5947 return; 5948 } 5949 5950 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]); 5951 5952 wait = (struct completion *)xchg(&scmd->host_scribble, NULL); 5953 if (wait != PQI_NO_COMPLETION) 5954 complete(wait); 5955 } 5956 5957 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, 5958 struct scsi_cmnd *scmd) 5959 { 5960 u32 oldest_jiffies; 5961 u8 lru_index; 5962 int i; 5963 int rc; 5964 struct pqi_scsi_dev *device; 5965 struct pqi_stream_data *pqi_stream_data; 5966 struct pqi_scsi_dev_raid_map_data rmd = { 0 }; 5967 5968 if (!ctrl_info->enable_stream_detection) 5969 return false; 5970 5971 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); 5972 if (rc) 5973 return false; 5974 5975 /* Check writes only. */ 5976 if (!rmd.is_write) 5977 return false; 5978 5979 device = scmd->device->hostdata; 5980 5981 /* Check for RAID 5/6 streams. */ 5982 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) 5983 return false; 5984 5985 /* 5986 * If controller does not support AIO RAID{5,6} writes, need to send 5987 * requests down non-AIO path. 5988 */ 5989 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || 5990 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) 5991 return true; 5992 5993 lru_index = 0; 5994 oldest_jiffies = INT_MAX; 5995 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) { 5996 pqi_stream_data = &device->stream_data[i]; 5997 /* 5998 * Check for adjacent request or request is within 5999 * the previous request. 6000 */ 6001 if ((pqi_stream_data->next_lba && 6002 rmd.first_block >= pqi_stream_data->next_lba) && 6003 rmd.first_block <= pqi_stream_data->next_lba + 6004 rmd.block_cnt) { 6005 pqi_stream_data->next_lba = rmd.first_block + 6006 rmd.block_cnt; 6007 pqi_stream_data->last_accessed = jiffies; 6008 per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++; 6009 return true; 6010 } 6011 6012 /* unused entry */ 6013 if (pqi_stream_data->last_accessed == 0) { 6014 lru_index = i; 6015 break; 6016 } 6017 6018 /* Find entry with oldest last accessed time. */ 6019 if (pqi_stream_data->last_accessed <= oldest_jiffies) { 6020 oldest_jiffies = pqi_stream_data->last_accessed; 6021 lru_index = i; 6022 } 6023 } 6024 6025 /* Set LRU entry. */ 6026 pqi_stream_data = &device->stream_data[lru_index]; 6027 pqi_stream_data->last_accessed = jiffies; 6028 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; 6029 6030 return false; 6031 } 6032 6033 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 6034 { 6035 int rc; 6036 struct pqi_ctrl_info *ctrl_info; 6037 struct pqi_scsi_dev *device; 6038 u16 hw_queue; 6039 struct pqi_queue_group *queue_group; 6040 bool raid_bypassed; 6041 u8 lun; 6042 6043 scmd->host_scribble = PQI_NO_COMPLETION; 6044 6045 device = scmd->device->hostdata; 6046 6047 if (!device) { 6048 set_host_byte(scmd, DID_NO_CONNECT); 6049 pqi_scsi_done(scmd); 6050 return 0; 6051 } 6052 6053 lun = (u8)scmd->device->lun; 6054 6055 atomic_inc(&device->scsi_cmds_outstanding[lun]); 6056 6057 ctrl_info = shost_to_hba(shost); 6058 6059 if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) { 6060 set_host_byte(scmd, DID_NO_CONNECT); 6061 pqi_scsi_done(scmd); 6062 return 0; 6063 } 6064 6065 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) { 6066 rc = SCSI_MLQUEUE_HOST_BUSY; 6067 goto out; 6068 } 6069 6070 /* 6071 * This is necessary because the SML doesn't zero out this field during 6072 * error recovery. 6073 */ 6074 scmd->result = 0; 6075 6076 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 6077 queue_group = &ctrl_info->queue_groups[hw_queue]; 6078 6079 if (pqi_is_logical_device(device)) { 6080 raid_bypassed = false; 6081 if (device->raid_bypass_enabled && 6082 pqi_is_bypass_eligible_request(scmd) && 6083 !pqi_is_parity_write_stream(ctrl_info, scmd)) { 6084 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6085 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { 6086 raid_bypassed = true; 6087 per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++; 6088 } 6089 } 6090 if (!raid_bypassed) 6091 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6092 } else { 6093 if (device->aio_enabled) 6094 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6095 else 6096 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6097 } 6098 6099 out: 6100 if (rc) { 6101 scmd->host_scribble = NULL; 6102 atomic_dec(&device->scsi_cmds_outstanding[lun]); 6103 } 6104 6105 return rc; 6106 } 6107 6108 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info) 6109 { 6110 unsigned int i; 6111 unsigned int path; 6112 unsigned long flags; 6113 unsigned int queued_io_count; 6114 struct pqi_queue_group *queue_group; 6115 struct pqi_io_request *io_request; 6116 6117 queued_io_count = 0; 6118 6119 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6120 queue_group = &ctrl_info->queue_groups[i]; 6121 for (path = 0; path < 2; path++) { 6122 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 6123 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry) 6124 queued_io_count++; 6125 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 6126 } 6127 } 6128 6129 return queued_io_count; 6130 } 6131 6132 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info) 6133 { 6134 unsigned int i; 6135 unsigned int path; 6136 unsigned int nonempty_inbound_queue_count; 6137 struct pqi_queue_group *queue_group; 6138 pqi_index_t iq_pi; 6139 pqi_index_t iq_ci; 6140 6141 nonempty_inbound_queue_count = 0; 6142 6143 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6144 queue_group = &ctrl_info->queue_groups[i]; 6145 for (path = 0; path < 2; path++) { 6146 iq_pi = queue_group->iq_pi_copy[path]; 6147 iq_ci = readl(queue_group->iq_ci[path]); 6148 if (iq_ci != iq_pi) 6149 nonempty_inbound_queue_count++; 6150 } 6151 } 6152 6153 return nonempty_inbound_queue_count; 6154 } 6155 6156 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10 6157 6158 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 6159 { 6160 unsigned long start_jiffies; 6161 unsigned long warning_timeout; 6162 unsigned int queued_io_count; 6163 unsigned int nonempty_inbound_queue_count; 6164 bool displayed_warning; 6165 6166 displayed_warning = false; 6167 start_jiffies = jiffies; 6168 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 6169 6170 while (1) { 6171 queued_io_count = pqi_queued_io_count(ctrl_info); 6172 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info); 6173 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0) 6174 break; 6175 pqi_check_ctrl_health(ctrl_info); 6176 if (pqi_ctrl_offline(ctrl_info)) 6177 return -ENXIO; 6178 if (time_after(jiffies, warning_timeout)) { 6179 dev_warn(&ctrl_info->pci_dev->dev, 6180 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n", 6181 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count); 6182 displayed_warning = true; 6183 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies; 6184 } 6185 usleep_range(1000, 2000); 6186 } 6187 6188 if (displayed_warning) 6189 dev_warn(&ctrl_info->pci_dev->dev, 6190 "queued I/O drained after waiting for %u seconds\n", 6191 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 6192 6193 return 0; 6194 } 6195 6196 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 6197 struct pqi_scsi_dev *device, u8 lun) 6198 { 6199 unsigned int i; 6200 unsigned int path; 6201 struct pqi_queue_group *queue_group; 6202 unsigned long flags; 6203 struct pqi_io_request *io_request; 6204 struct pqi_io_request *next; 6205 struct scsi_cmnd *scmd; 6206 struct pqi_scsi_dev *scsi_device; 6207 6208 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6209 queue_group = &ctrl_info->queue_groups[i]; 6210 6211 for (path = 0; path < 2; path++) { 6212 spin_lock_irqsave( 6213 &queue_group->submit_lock[path], flags); 6214 6215 list_for_each_entry_safe(io_request, next, 6216 &queue_group->request_list[path], 6217 request_list_entry) { 6218 6219 scmd = io_request->scmd; 6220 if (!scmd) 6221 continue; 6222 6223 scsi_device = scmd->device->hostdata; 6224 6225 list_del(&io_request->request_list_entry); 6226 if (scsi_device == device && (u8)scmd->device->lun == lun) 6227 set_host_byte(scmd, DID_RESET); 6228 else 6229 set_host_byte(scmd, DID_REQUEUE); 6230 pqi_free_io_request(io_request); 6231 scsi_dma_unmap(scmd); 6232 pqi_scsi_done(scmd); 6233 } 6234 6235 spin_unlock_irqrestore( 6236 &queue_group->submit_lock[path], flags); 6237 } 6238 } 6239 } 6240 6241 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10 6242 6243 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 6244 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs) 6245 { 6246 int cmds_outstanding; 6247 unsigned long start_jiffies; 6248 unsigned long warning_timeout; 6249 unsigned long msecs_waiting; 6250 6251 start_jiffies = jiffies; 6252 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 6253 6254 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) { 6255 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) { 6256 pqi_check_ctrl_health(ctrl_info); 6257 if (pqi_ctrl_offline(ctrl_info)) 6258 return -ENXIO; 6259 } 6260 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies); 6261 if (msecs_waiting >= timeout_msecs) { 6262 dev_err(&ctrl_info->pci_dev->dev, 6263 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n", 6264 ctrl_info->scsi_host->host_no, device->bus, device->target, 6265 lun, msecs_waiting / 1000, cmds_outstanding); 6266 return -ETIMEDOUT; 6267 } 6268 if (time_after(jiffies, warning_timeout)) { 6269 dev_warn(&ctrl_info->pci_dev->dev, 6270 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n", 6271 ctrl_info->scsi_host->host_no, device->bus, device->target, 6272 lun, msecs_waiting / 1000, cmds_outstanding); 6273 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies; 6274 } 6275 usleep_range(1000, 2000); 6276 } 6277 6278 return 0; 6279 } 6280 6281 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 6282 void *context) 6283 { 6284 struct completion *waiting = context; 6285 6286 complete(waiting); 6287 } 6288 6289 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 6290 6291 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 6292 struct pqi_scsi_dev *device, u8 lun, struct completion *wait) 6293 { 6294 int rc; 6295 unsigned int wait_secs; 6296 int cmds_outstanding; 6297 6298 wait_secs = 0; 6299 6300 while (1) { 6301 if (wait_for_completion_io_timeout(wait, 6302 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) { 6303 rc = 0; 6304 break; 6305 } 6306 6307 pqi_check_ctrl_health(ctrl_info); 6308 if (pqi_ctrl_offline(ctrl_info)) { 6309 rc = -ENXIO; 6310 break; 6311 } 6312 6313 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS; 6314 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]); 6315 dev_warn(&ctrl_info->pci_dev->dev, 6316 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n", 6317 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding); 6318 } 6319 6320 return rc; 6321 } 6322 6323 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30 6324 6325 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6326 { 6327 int rc; 6328 struct pqi_io_request *io_request; 6329 DECLARE_COMPLETION_ONSTACK(wait); 6330 struct pqi_task_management_request *request; 6331 6332 io_request = pqi_alloc_io_request(ctrl_info, NULL); 6333 io_request->io_complete_callback = pqi_lun_reset_complete; 6334 io_request->context = &wait; 6335 6336 request = io_request->iu; 6337 memset(request, 0, sizeof(*request)); 6338 6339 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 6340 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 6341 &request->header.iu_length); 6342 put_unaligned_le16(io_request->index, &request->request_id); 6343 memcpy(request->lun_number, device->scsi3addr, 6344 sizeof(request->lun_number)); 6345 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) 6346 request->ml_device_lun_number = lun; 6347 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 6348 if (ctrl_info->tmf_iu_timeout_supported) 6349 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout); 6350 6351 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 6352 io_request); 6353 6354 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait); 6355 if (rc == 0) 6356 rc = io_request->status; 6357 6358 pqi_free_io_request(io_request); 6359 6360 return rc; 6361 } 6362 6363 #define PQI_LUN_RESET_RETRIES 3 6364 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000) 6365 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000) 6366 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000) 6367 6368 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6369 { 6370 int reset_rc; 6371 int wait_rc; 6372 unsigned int retries; 6373 unsigned long timeout_msecs; 6374 6375 for (retries = 0;;) { 6376 reset_rc = pqi_lun_reset(ctrl_info, device, lun); 6377 if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES) 6378 break; 6379 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); 6380 } 6381 6382 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS : 6383 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS; 6384 6385 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs); 6386 if (wait_rc && reset_rc == 0) 6387 reset_rc = wait_rc; 6388 6389 return reset_rc == 0 ? SUCCESS : FAILED; 6390 } 6391 6392 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6393 { 6394 int rc; 6395 6396 pqi_ctrl_block_requests(ctrl_info); 6397 pqi_ctrl_wait_until_quiesced(ctrl_info); 6398 pqi_fail_io_queued_for_device(ctrl_info, device, lun); 6399 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 6400 pqi_device_reset_start(device, lun); 6401 pqi_ctrl_unblock_requests(ctrl_info); 6402 if (rc) 6403 rc = FAILED; 6404 else 6405 rc = pqi_lun_reset_with_retries(ctrl_info, device, lun); 6406 pqi_device_reset_done(device, lun); 6407 6408 return rc; 6409 } 6410 6411 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode) 6412 { 6413 int rc; 6414 6415 mutex_lock(&ctrl_info->lun_reset_mutex); 6416 6417 dev_err(&ctrl_info->pci_dev->dev, 6418 "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n", 6419 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); 6420 6421 pqi_check_ctrl_health(ctrl_info); 6422 if (pqi_ctrl_offline(ctrl_info)) 6423 rc = FAILED; 6424 else 6425 rc = pqi_device_reset(ctrl_info, device, lun); 6426 6427 dev_err(&ctrl_info->pci_dev->dev, 6428 "reset of scsi %d:%d:%d:%u: %s\n", 6429 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, 6430 rc == SUCCESS ? "SUCCESS" : "FAILED"); 6431 6432 mutex_unlock(&ctrl_info->lun_reset_mutex); 6433 6434 return rc; 6435 } 6436 6437 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 6438 { 6439 struct Scsi_Host *shost; 6440 struct pqi_ctrl_info *ctrl_info; 6441 struct pqi_scsi_dev *device; 6442 u8 scsi_opcode; 6443 6444 shost = scmd->device->host; 6445 ctrl_info = shost_to_hba(shost); 6446 device = scmd->device->hostdata; 6447 scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; 6448 6449 return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode); 6450 } 6451 6452 static void pqi_tmf_worker(struct work_struct *work) 6453 { 6454 struct pqi_tmf_work *tmf_work; 6455 struct scsi_cmnd *scmd; 6456 6457 tmf_work = container_of(work, struct pqi_tmf_work, work_struct); 6458 scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL); 6459 6460 pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode); 6461 } 6462 6463 static int pqi_eh_abort_handler(struct scsi_cmnd *scmd) 6464 { 6465 struct Scsi_Host *shost; 6466 struct pqi_ctrl_info *ctrl_info; 6467 struct pqi_scsi_dev *device; 6468 struct pqi_tmf_work *tmf_work; 6469 DECLARE_COMPLETION_ONSTACK(wait); 6470 6471 shost = scmd->device->host; 6472 ctrl_info = shost_to_hba(shost); 6473 device = scmd->device->hostdata; 6474 6475 dev_err(&ctrl_info->pci_dev->dev, 6476 "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n", 6477 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6478 6479 if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) { 6480 dev_err(&ctrl_info->pci_dev->dev, 6481 "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n", 6482 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6483 scmd->result = DID_RESET << 16; 6484 goto out; 6485 } 6486 6487 tmf_work = &device->tmf_work[scmd->device->lun]; 6488 6489 if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) { 6490 tmf_work->ctrl_info = ctrl_info; 6491 tmf_work->device = device; 6492 tmf_work->lun = (u8)scmd->device->lun; 6493 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; 6494 schedule_work(&tmf_work->work_struct); 6495 } 6496 6497 wait_for_completion(&wait); 6498 6499 dev_err(&ctrl_info->pci_dev->dev, 6500 "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n", 6501 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6502 6503 out: 6504 6505 return SUCCESS; 6506 } 6507 6508 static int pqi_sdev_init(struct scsi_device *sdev) 6509 { 6510 struct pqi_scsi_dev *device; 6511 unsigned long flags; 6512 struct pqi_ctrl_info *ctrl_info; 6513 struct scsi_target *starget; 6514 struct sas_rphy *rphy; 6515 6516 ctrl_info = shost_to_hba(sdev->host); 6517 6518 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6519 6520 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 6521 starget = scsi_target(sdev); 6522 rphy = target_to_rphy(starget); 6523 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 6524 if (device) { 6525 if (device->target_lun_valid) { 6526 device->ignore_device = true; 6527 } else { 6528 device->target = sdev_id(sdev); 6529 device->lun = sdev->lun; 6530 device->target_lun_valid = true; 6531 } 6532 } 6533 } else { 6534 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 6535 sdev_id(sdev), sdev->lun); 6536 } 6537 6538 if (device) { 6539 sdev->hostdata = device; 6540 device->sdev = sdev; 6541 if (device->queue_depth) { 6542 device->advertised_queue_depth = device->queue_depth; 6543 scsi_change_queue_depth(sdev, 6544 device->advertised_queue_depth); 6545 } 6546 if (pqi_is_logical_device(device)) { 6547 pqi_disable_write_same(sdev); 6548 } else { 6549 sdev->allow_restart = 1; 6550 if (device->device_type == SA_DEVICE_TYPE_NVME) 6551 pqi_disable_write_same(sdev); 6552 } 6553 } 6554 6555 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6556 6557 return 0; 6558 } 6559 6560 static void pqi_map_queues(struct Scsi_Host *shost) 6561 { 6562 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6563 6564 if (!ctrl_info->disable_managed_interrupts) 6565 blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 6566 &ctrl_info->pci_dev->dev, 0); 6567 else 6568 blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); 6569 } 6570 6571 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) 6572 { 6573 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; 6574 } 6575 6576 static int pqi_sdev_configure(struct scsi_device *sdev, 6577 struct queue_limits *lim) 6578 { 6579 int rc = 0; 6580 struct pqi_scsi_dev *device; 6581 6582 device = sdev->hostdata; 6583 device->devtype = sdev->type; 6584 6585 if (pqi_is_tape_changer_device(device) && device->ignore_device) { 6586 rc = -ENXIO; 6587 device->ignore_device = false; 6588 } 6589 6590 return rc; 6591 } 6592 6593 static void pqi_sdev_destroy(struct scsi_device *sdev) 6594 { 6595 struct pqi_ctrl_info *ctrl_info; 6596 struct pqi_scsi_dev *device; 6597 int mutex_acquired; 6598 unsigned long flags; 6599 6600 ctrl_info = shost_to_hba(sdev->host); 6601 6602 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); 6603 if (!mutex_acquired) 6604 return; 6605 6606 device = sdev->hostdata; 6607 if (!device) { 6608 mutex_unlock(&ctrl_info->scan_mutex); 6609 return; 6610 } 6611 6612 device->lun_count--; 6613 if (device->lun_count > 0) { 6614 mutex_unlock(&ctrl_info->scan_mutex); 6615 return; 6616 } 6617 6618 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6619 list_del(&device->scsi_device_list_entry); 6620 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6621 6622 mutex_unlock(&ctrl_info->scan_mutex); 6623 6624 pqi_dev_info(ctrl_info, "removed", device); 6625 pqi_free_device(device); 6626 } 6627 6628 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 6629 { 6630 struct pci_dev *pci_dev; 6631 u32 subsystem_vendor; 6632 u32 subsystem_device; 6633 cciss_pci_info_struct pci_info; 6634 6635 if (!arg) 6636 return -EINVAL; 6637 6638 pci_dev = ctrl_info->pci_dev; 6639 6640 pci_info.domain = pci_domain_nr(pci_dev->bus); 6641 pci_info.bus = pci_dev->bus->number; 6642 pci_info.dev_fn = pci_dev->devfn; 6643 subsystem_vendor = pci_dev->subsystem_vendor; 6644 subsystem_device = pci_dev->subsystem_device; 6645 pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; 6646 6647 if (copy_to_user(arg, &pci_info, sizeof(pci_info))) 6648 return -EFAULT; 6649 6650 return 0; 6651 } 6652 6653 static int pqi_getdrivver_ioctl(void __user *arg) 6654 { 6655 u32 version; 6656 6657 if (!arg) 6658 return -EINVAL; 6659 6660 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 6661 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 6662 6663 if (copy_to_user(arg, &version, sizeof(version))) 6664 return -EFAULT; 6665 6666 return 0; 6667 } 6668 6669 struct ciss_error_info { 6670 u8 scsi_status; 6671 int command_status; 6672 size_t sense_data_length; 6673 }; 6674 6675 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 6676 struct ciss_error_info *ciss_error_info) 6677 { 6678 int ciss_cmd_status; 6679 size_t sense_data_length; 6680 6681 switch (pqi_error_info->data_out_result) { 6682 case PQI_DATA_IN_OUT_GOOD: 6683 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 6684 break; 6685 case PQI_DATA_IN_OUT_UNDERFLOW: 6686 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 6687 break; 6688 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 6689 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 6690 break; 6691 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 6692 case PQI_DATA_IN_OUT_BUFFER_ERROR: 6693 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 6694 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 6695 case PQI_DATA_IN_OUT_ERROR: 6696 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 6697 break; 6698 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 6699 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 6700 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 6701 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 6702 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 6703 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 6704 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 6705 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 6706 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 6707 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 6708 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 6709 break; 6710 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 6711 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 6712 break; 6713 case PQI_DATA_IN_OUT_ABORTED: 6714 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 6715 break; 6716 case PQI_DATA_IN_OUT_TIMEOUT: 6717 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 6718 break; 6719 default: 6720 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 6721 break; 6722 } 6723 6724 sense_data_length = 6725 get_unaligned_le16(&pqi_error_info->sense_data_length); 6726 if (sense_data_length == 0) 6727 sense_data_length = 6728 get_unaligned_le16(&pqi_error_info->response_data_length); 6729 if (sense_data_length) 6730 if (sense_data_length > sizeof(pqi_error_info->data)) 6731 sense_data_length = sizeof(pqi_error_info->data); 6732 6733 ciss_error_info->scsi_status = pqi_error_info->status; 6734 ciss_error_info->command_status = ciss_cmd_status; 6735 ciss_error_info->sense_data_length = sense_data_length; 6736 } 6737 6738 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 6739 { 6740 int rc; 6741 char *kernel_buffer = NULL; 6742 u16 iu_length; 6743 size_t sense_data_length; 6744 IOCTL_Command_struct iocommand; 6745 struct pqi_raid_path_request request; 6746 struct pqi_raid_error_info pqi_error_info; 6747 struct ciss_error_info ciss_error_info; 6748 6749 if (pqi_ctrl_offline(ctrl_info)) 6750 return -ENXIO; 6751 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info)) 6752 return -EBUSY; 6753 if (!arg) 6754 return -EINVAL; 6755 if (!capable(CAP_SYS_RAWIO)) 6756 return -EPERM; 6757 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 6758 return -EFAULT; 6759 if (iocommand.buf_size < 1 && 6760 iocommand.Request.Type.Direction != XFER_NONE) 6761 return -EINVAL; 6762 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 6763 return -EINVAL; 6764 if (iocommand.Request.Type.Type != TYPE_CMD) 6765 return -EINVAL; 6766 6767 switch (iocommand.Request.Type.Direction) { 6768 case XFER_NONE: 6769 case XFER_WRITE: 6770 case XFER_READ: 6771 case XFER_READ | XFER_WRITE: 6772 break; 6773 default: 6774 return -EINVAL; 6775 } 6776 6777 if (iocommand.buf_size > 0) { 6778 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 6779 if (!kernel_buffer) 6780 return -ENOMEM; 6781 if (iocommand.Request.Type.Direction & XFER_WRITE) { 6782 if (copy_from_user(kernel_buffer, iocommand.buf, 6783 iocommand.buf_size)) { 6784 rc = -EFAULT; 6785 goto out; 6786 } 6787 } else { 6788 memset(kernel_buffer, 0, iocommand.buf_size); 6789 } 6790 } 6791 6792 memset(&request, 0, sizeof(request)); 6793 6794 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 6795 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 6796 PQI_REQUEST_HEADER_LENGTH; 6797 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 6798 sizeof(request.lun_number)); 6799 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 6800 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 6801 6802 switch (iocommand.Request.Type.Direction) { 6803 case XFER_NONE: 6804 request.data_direction = SOP_NO_DIRECTION_FLAG; 6805 break; 6806 case XFER_WRITE: 6807 request.data_direction = SOP_WRITE_FLAG; 6808 break; 6809 case XFER_READ: 6810 request.data_direction = SOP_READ_FLAG; 6811 break; 6812 case XFER_READ | XFER_WRITE: 6813 request.data_direction = SOP_BIDIRECTIONAL; 6814 break; 6815 } 6816 6817 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 6818 6819 if (iocommand.buf_size > 0) { 6820 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 6821 6822 rc = pqi_map_single(ctrl_info->pci_dev, 6823 &request.sg_descriptors[0], kernel_buffer, 6824 iocommand.buf_size, DMA_BIDIRECTIONAL); 6825 if (rc) 6826 goto out; 6827 6828 iu_length += sizeof(request.sg_descriptors[0]); 6829 } 6830 6831 put_unaligned_le16(iu_length, &request.header.iu_length); 6832 6833 if (ctrl_info->raid_iu_timeout_supported) 6834 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); 6835 6836 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6837 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info); 6838 6839 if (iocommand.buf_size > 0) 6840 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 6841 DMA_BIDIRECTIONAL); 6842 6843 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 6844 6845 if (rc == 0) { 6846 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 6847 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 6848 iocommand.error_info.CommandStatus = 6849 ciss_error_info.command_status; 6850 sense_data_length = ciss_error_info.sense_data_length; 6851 if (sense_data_length) { 6852 if (sense_data_length > 6853 sizeof(iocommand.error_info.SenseInfo)) 6854 sense_data_length = 6855 sizeof(iocommand.error_info.SenseInfo); 6856 memcpy(iocommand.error_info.SenseInfo, 6857 pqi_error_info.data, sense_data_length); 6858 iocommand.error_info.SenseLen = sense_data_length; 6859 } 6860 } 6861 6862 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 6863 rc = -EFAULT; 6864 goto out; 6865 } 6866 6867 if (rc == 0 && iocommand.buf_size > 0 && 6868 (iocommand.Request.Type.Direction & XFER_READ)) { 6869 if (copy_to_user(iocommand.buf, kernel_buffer, 6870 iocommand.buf_size)) { 6871 rc = -EFAULT; 6872 } 6873 } 6874 6875 out: 6876 kfree(kernel_buffer); 6877 6878 return rc; 6879 } 6880 6881 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, 6882 void __user *arg) 6883 { 6884 int rc; 6885 struct pqi_ctrl_info *ctrl_info; 6886 6887 ctrl_info = shost_to_hba(sdev->host); 6888 6889 switch (cmd) { 6890 case CCISS_DEREGDISK: 6891 case CCISS_REGNEWDISK: 6892 case CCISS_REGNEWD: 6893 rc = pqi_scan_scsi_devices(ctrl_info); 6894 break; 6895 case CCISS_GETPCIINFO: 6896 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 6897 break; 6898 case CCISS_GETDRIVVER: 6899 rc = pqi_getdrivver_ioctl(arg); 6900 break; 6901 case CCISS_PASSTHRU: 6902 rc = pqi_passthru_ioctl(ctrl_info, arg); 6903 break; 6904 default: 6905 rc = -EINVAL; 6906 break; 6907 } 6908 6909 return rc; 6910 } 6911 6912 static ssize_t pqi_firmware_version_show(struct device *dev, 6913 struct device_attribute *attr, char *buffer) 6914 { 6915 struct Scsi_Host *shost; 6916 struct pqi_ctrl_info *ctrl_info; 6917 6918 shost = class_to_shost(dev); 6919 ctrl_info = shost_to_hba(shost); 6920 6921 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); 6922 } 6923 6924 static ssize_t pqi_serial_number_show(struct device *dev, 6925 struct device_attribute *attr, char *buffer) 6926 { 6927 struct Scsi_Host *shost; 6928 struct pqi_ctrl_info *ctrl_info; 6929 6930 shost = class_to_shost(dev); 6931 ctrl_info = shost_to_hba(shost); 6932 6933 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); 6934 } 6935 6936 static ssize_t pqi_model_show(struct device *dev, 6937 struct device_attribute *attr, char *buffer) 6938 { 6939 struct Scsi_Host *shost; 6940 struct pqi_ctrl_info *ctrl_info; 6941 6942 shost = class_to_shost(dev); 6943 ctrl_info = shost_to_hba(shost); 6944 6945 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); 6946 } 6947 6948 static ssize_t pqi_vendor_show(struct device *dev, 6949 struct device_attribute *attr, char *buffer) 6950 { 6951 struct Scsi_Host *shost; 6952 struct pqi_ctrl_info *ctrl_info; 6953 6954 shost = class_to_shost(dev); 6955 ctrl_info = shost_to_hba(shost); 6956 6957 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); 6958 } 6959 6960 static ssize_t pqi_host_rescan_store(struct device *dev, 6961 struct device_attribute *attr, const char *buffer, size_t count) 6962 { 6963 struct Scsi_Host *shost = class_to_shost(dev); 6964 6965 pqi_scan_start(shost); 6966 6967 return count; 6968 } 6969 6970 static ssize_t pqi_lockup_action_show(struct device *dev, 6971 struct device_attribute *attr, char *buffer) 6972 { 6973 int count = 0; 6974 unsigned int i; 6975 6976 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6977 if (pqi_lockup_actions[i].action == pqi_lockup_action) 6978 count += scnprintf(buffer + count, PAGE_SIZE - count, 6979 "[%s] ", pqi_lockup_actions[i].name); 6980 else 6981 count += scnprintf(buffer + count, PAGE_SIZE - count, 6982 "%s ", pqi_lockup_actions[i].name); 6983 } 6984 6985 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); 6986 6987 return count; 6988 } 6989 6990 static ssize_t pqi_lockup_action_store(struct device *dev, 6991 struct device_attribute *attr, const char *buffer, size_t count) 6992 { 6993 unsigned int i; 6994 char *action_name; 6995 char action_name_buffer[32]; 6996 6997 strscpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 6998 action_name = strstrip(action_name_buffer); 6999 7000 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 7001 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 7002 pqi_lockup_action = pqi_lockup_actions[i].action; 7003 return count; 7004 } 7005 } 7006 7007 return -EINVAL; 7008 } 7009 7010 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev, 7011 struct device_attribute *attr, char *buffer) 7012 { 7013 struct Scsi_Host *shost = class_to_shost(dev); 7014 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7015 7016 return scnprintf(buffer, 10, "%x\n", 7017 ctrl_info->enable_stream_detection); 7018 } 7019 7020 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev, 7021 struct device_attribute *attr, const char *buffer, size_t count) 7022 { 7023 struct Scsi_Host *shost = class_to_shost(dev); 7024 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7025 u8 set_stream_detection = 0; 7026 7027 if (kstrtou8(buffer, 0, &set_stream_detection)) 7028 return -EINVAL; 7029 7030 if (set_stream_detection > 0) 7031 set_stream_detection = 1; 7032 7033 ctrl_info->enable_stream_detection = set_stream_detection; 7034 7035 return count; 7036 } 7037 7038 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev, 7039 struct device_attribute *attr, char *buffer) 7040 { 7041 struct Scsi_Host *shost = class_to_shost(dev); 7042 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7043 7044 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); 7045 } 7046 7047 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev, 7048 struct device_attribute *attr, const char *buffer, size_t count) 7049 { 7050 struct Scsi_Host *shost = class_to_shost(dev); 7051 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7052 u8 set_r5_writes = 0; 7053 7054 if (kstrtou8(buffer, 0, &set_r5_writes)) 7055 return -EINVAL; 7056 7057 if (set_r5_writes > 0) 7058 set_r5_writes = 1; 7059 7060 ctrl_info->enable_r5_writes = set_r5_writes; 7061 7062 return count; 7063 } 7064 7065 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev, 7066 struct device_attribute *attr, char *buffer) 7067 { 7068 struct Scsi_Host *shost = class_to_shost(dev); 7069 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7070 7071 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); 7072 } 7073 7074 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, 7075 struct device_attribute *attr, const char *buffer, size_t count) 7076 { 7077 struct Scsi_Host *shost = class_to_shost(dev); 7078 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7079 u8 set_r6_writes = 0; 7080 7081 if (kstrtou8(buffer, 0, &set_r6_writes)) 7082 return -EINVAL; 7083 7084 if (set_r6_writes > 0) 7085 set_r6_writes = 1; 7086 7087 ctrl_info->enable_r6_writes = set_r6_writes; 7088 7089 return count; 7090 } 7091 7092 static DEVICE_STRING_ATTR_RO(driver_version, 0444, 7093 DRIVER_VERSION BUILD_TIMESTAMP); 7094 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); 7095 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); 7096 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); 7097 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); 7098 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 7099 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show, 7100 pqi_lockup_action_store); 7101 static DEVICE_ATTR(enable_stream_detection, 0644, 7102 pqi_host_enable_stream_detection_show, 7103 pqi_host_enable_stream_detection_store); 7104 static DEVICE_ATTR(enable_r5_writes, 0644, 7105 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store); 7106 static DEVICE_ATTR(enable_r6_writes, 0644, 7107 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); 7108 7109 static struct attribute *pqi_shost_attrs[] = { 7110 &dev_attr_driver_version.attr.attr, 7111 &dev_attr_firmware_version.attr, 7112 &dev_attr_model.attr, 7113 &dev_attr_serial_number.attr, 7114 &dev_attr_vendor.attr, 7115 &dev_attr_rescan.attr, 7116 &dev_attr_lockup_action.attr, 7117 &dev_attr_enable_stream_detection.attr, 7118 &dev_attr_enable_r5_writes.attr, 7119 &dev_attr_enable_r6_writes.attr, 7120 NULL 7121 }; 7122 7123 ATTRIBUTE_GROUPS(pqi_shost); 7124 7125 static ssize_t pqi_unique_id_show(struct device *dev, 7126 struct device_attribute *attr, char *buffer) 7127 { 7128 struct pqi_ctrl_info *ctrl_info; 7129 struct scsi_device *sdev; 7130 struct pqi_scsi_dev *device; 7131 unsigned long flags; 7132 u8 unique_id[16]; 7133 7134 sdev = to_scsi_device(dev); 7135 ctrl_info = shost_to_hba(sdev->host); 7136 7137 if (pqi_ctrl_offline(ctrl_info)) 7138 return -ENODEV; 7139 7140 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7141 7142 device = sdev->hostdata; 7143 if (!device) { 7144 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7145 return -ENODEV; 7146 } 7147 7148 if (device->is_physical_device) 7149 memcpy(unique_id, device->wwid, sizeof(device->wwid)); 7150 else 7151 memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); 7152 7153 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7154 7155 return scnprintf(buffer, PAGE_SIZE, 7156 "%02X%02X%02X%02X%02X%02X%02X%02X" 7157 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 7158 unique_id[0], unique_id[1], unique_id[2], unique_id[3], 7159 unique_id[4], unique_id[5], unique_id[6], unique_id[7], 7160 unique_id[8], unique_id[9], unique_id[10], unique_id[11], 7161 unique_id[12], unique_id[13], unique_id[14], unique_id[15]); 7162 } 7163 7164 static ssize_t pqi_lunid_show(struct device *dev, 7165 struct device_attribute *attr, char *buffer) 7166 { 7167 struct pqi_ctrl_info *ctrl_info; 7168 struct scsi_device *sdev; 7169 struct pqi_scsi_dev *device; 7170 unsigned long flags; 7171 u8 lunid[8]; 7172 7173 sdev = to_scsi_device(dev); 7174 ctrl_info = shost_to_hba(sdev->host); 7175 7176 if (pqi_ctrl_offline(ctrl_info)) 7177 return -ENODEV; 7178 7179 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7180 7181 device = sdev->hostdata; 7182 if (!device) { 7183 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7184 return -ENODEV; 7185 } 7186 7187 memcpy(lunid, device->scsi3addr, sizeof(lunid)); 7188 7189 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7190 7191 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); 7192 } 7193 7194 #define MAX_PATHS 8 7195 7196 static ssize_t pqi_path_info_show(struct device *dev, 7197 struct device_attribute *attr, char *buf) 7198 { 7199 struct pqi_ctrl_info *ctrl_info; 7200 struct scsi_device *sdev; 7201 struct pqi_scsi_dev *device; 7202 unsigned long flags; 7203 int i; 7204 int output_len = 0; 7205 u8 box; 7206 u8 bay; 7207 u8 path_map_index; 7208 char *active; 7209 u8 phys_connector[2]; 7210 7211 sdev = to_scsi_device(dev); 7212 ctrl_info = shost_to_hba(sdev->host); 7213 7214 if (pqi_ctrl_offline(ctrl_info)) 7215 return -ENODEV; 7216 7217 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7218 7219 device = sdev->hostdata; 7220 if (!device) { 7221 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7222 return -ENODEV; 7223 } 7224 7225 bay = device->bay; 7226 for (i = 0; i < MAX_PATHS; i++) { 7227 path_map_index = 1 << i; 7228 if (i == device->active_path_index) 7229 active = "Active"; 7230 else if (device->path_map & path_map_index) 7231 active = "Inactive"; 7232 else 7233 continue; 7234 7235 output_len += scnprintf(buf + output_len, 7236 PAGE_SIZE - output_len, 7237 "[%d:%d:%d:%d] %20.20s ", 7238 ctrl_info->scsi_host->host_no, 7239 device->bus, device->target, 7240 device->lun, 7241 scsi_device_type(device->devtype)); 7242 7243 if (device->devtype == TYPE_RAID || 7244 pqi_is_logical_device(device)) 7245 goto end_buffer; 7246 7247 memcpy(&phys_connector, &device->phys_connector[i], 7248 sizeof(phys_connector)); 7249 if (phys_connector[0] < '0') 7250 phys_connector[0] = '0'; 7251 if (phys_connector[1] < '0') 7252 phys_connector[1] = '0'; 7253 7254 output_len += scnprintf(buf + output_len, 7255 PAGE_SIZE - output_len, 7256 "PORT: %.2s ", phys_connector); 7257 7258 box = device->box[i]; 7259 if (box != 0 && box != 0xFF) 7260 output_len += scnprintf(buf + output_len, 7261 PAGE_SIZE - output_len, 7262 "BOX: %hhu ", box); 7263 7264 if ((device->devtype == TYPE_DISK || 7265 device->devtype == TYPE_ZBC) && 7266 pqi_expose_device(device)) 7267 output_len += scnprintf(buf + output_len, 7268 PAGE_SIZE - output_len, 7269 "BAY: %hhu ", bay); 7270 7271 end_buffer: 7272 output_len += scnprintf(buf + output_len, 7273 PAGE_SIZE - output_len, 7274 "%s\n", active); 7275 } 7276 7277 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7278 7279 return output_len; 7280 } 7281 7282 static ssize_t pqi_sas_address_show(struct device *dev, 7283 struct device_attribute *attr, char *buffer) 7284 { 7285 struct pqi_ctrl_info *ctrl_info; 7286 struct scsi_device *sdev; 7287 struct pqi_scsi_dev *device; 7288 unsigned long flags; 7289 u64 sas_address; 7290 7291 sdev = to_scsi_device(dev); 7292 ctrl_info = shost_to_hba(sdev->host); 7293 7294 if (pqi_ctrl_offline(ctrl_info)) 7295 return -ENODEV; 7296 7297 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7298 7299 device = sdev->hostdata; 7300 if (!device) { 7301 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7302 return -ENODEV; 7303 } 7304 7305 sas_address = device->sas_address; 7306 7307 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7308 7309 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 7310 } 7311 7312 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 7313 struct device_attribute *attr, char *buffer) 7314 { 7315 struct pqi_ctrl_info *ctrl_info; 7316 struct scsi_device *sdev; 7317 struct pqi_scsi_dev *device; 7318 unsigned long flags; 7319 7320 sdev = to_scsi_device(dev); 7321 ctrl_info = shost_to_hba(sdev->host); 7322 7323 if (pqi_ctrl_offline(ctrl_info)) 7324 return -ENODEV; 7325 7326 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7327 7328 device = sdev->hostdata; 7329 if (!device) { 7330 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7331 return -ENODEV; 7332 } 7333 7334 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 7335 buffer[1] = '\n'; 7336 buffer[2] = '\0'; 7337 7338 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7339 7340 return 2; 7341 } 7342 7343 static ssize_t pqi_raid_level_show(struct device *dev, 7344 struct device_attribute *attr, char *buffer) 7345 { 7346 struct pqi_ctrl_info *ctrl_info; 7347 struct scsi_device *sdev; 7348 struct pqi_scsi_dev *device; 7349 unsigned long flags; 7350 char *raid_level; 7351 7352 sdev = to_scsi_device(dev); 7353 ctrl_info = shost_to_hba(sdev->host); 7354 7355 if (pqi_ctrl_offline(ctrl_info)) 7356 return -ENODEV; 7357 7358 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7359 7360 device = sdev->hostdata; 7361 if (!device) { 7362 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7363 return -ENODEV; 7364 } 7365 7366 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) 7367 raid_level = pqi_raid_level_to_string(device->raid_level); 7368 else 7369 raid_level = "N/A"; 7370 7371 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7372 7373 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 7374 } 7375 7376 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, 7377 struct device_attribute *attr, char *buffer) 7378 { 7379 struct pqi_ctrl_info *ctrl_info; 7380 struct scsi_device *sdev; 7381 struct pqi_scsi_dev *device; 7382 unsigned long flags; 7383 u64 raid_bypass_cnt; 7384 int cpu; 7385 7386 sdev = to_scsi_device(dev); 7387 ctrl_info = shost_to_hba(sdev->host); 7388 7389 if (pqi_ctrl_offline(ctrl_info)) 7390 return -ENODEV; 7391 7392 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7393 7394 device = sdev->hostdata; 7395 if (!device) { 7396 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7397 return -ENODEV; 7398 } 7399 7400 raid_bypass_cnt = 0; 7401 7402 if (device->raid_io_stats) { 7403 for_each_online_cpu(cpu) { 7404 raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt; 7405 } 7406 } 7407 7408 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7409 7410 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt); 7411 } 7412 7413 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev, 7414 struct device_attribute *attr, char *buf) 7415 { 7416 struct pqi_ctrl_info *ctrl_info; 7417 struct scsi_device *sdev; 7418 struct pqi_scsi_dev *device; 7419 unsigned long flags; 7420 int output_len = 0; 7421 7422 sdev = to_scsi_device(dev); 7423 ctrl_info = shost_to_hba(sdev->host); 7424 7425 if (pqi_ctrl_offline(ctrl_info)) 7426 return -ENODEV; 7427 7428 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7429 7430 device = sdev->hostdata; 7431 if (!device) { 7432 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7433 return -ENODEV; 7434 } 7435 7436 output_len = snprintf(buf, PAGE_SIZE, "%d\n", 7437 device->ncq_prio_enable); 7438 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7439 7440 return output_len; 7441 } 7442 7443 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, 7444 struct device_attribute *attr, 7445 const char *buf, size_t count) 7446 { 7447 struct pqi_ctrl_info *ctrl_info; 7448 struct scsi_device *sdev; 7449 struct pqi_scsi_dev *device; 7450 unsigned long flags; 7451 u8 ncq_prio_enable = 0; 7452 7453 if (kstrtou8(buf, 0, &ncq_prio_enable)) 7454 return -EINVAL; 7455 7456 sdev = to_scsi_device(dev); 7457 ctrl_info = shost_to_hba(sdev->host); 7458 7459 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7460 7461 device = sdev->hostdata; 7462 7463 if (!device) { 7464 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7465 return -ENODEV; 7466 } 7467 7468 if (!device->ncq_prio_support) { 7469 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7470 return -EINVAL; 7471 } 7472 7473 device->ncq_prio_enable = ncq_prio_enable; 7474 7475 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7476 7477 return strlen(buf); 7478 } 7479 7480 static ssize_t pqi_numa_node_show(struct device *dev, 7481 struct device_attribute *attr, char *buffer) 7482 { 7483 struct scsi_device *sdev; 7484 struct pqi_ctrl_info *ctrl_info; 7485 7486 sdev = to_scsi_device(dev); 7487 ctrl_info = shost_to_hba(sdev->host); 7488 7489 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); 7490 } 7491 7492 static ssize_t pqi_write_stream_cnt_show(struct device *dev, 7493 struct device_attribute *attr, char *buffer) 7494 { 7495 struct pqi_ctrl_info *ctrl_info; 7496 struct scsi_device *sdev; 7497 struct pqi_scsi_dev *device; 7498 unsigned long flags; 7499 u64 write_stream_cnt; 7500 int cpu; 7501 7502 sdev = to_scsi_device(dev); 7503 ctrl_info = shost_to_hba(sdev->host); 7504 7505 if (pqi_ctrl_offline(ctrl_info)) 7506 return -ENODEV; 7507 7508 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7509 7510 device = sdev->hostdata; 7511 if (!device) { 7512 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7513 return -ENODEV; 7514 } 7515 7516 write_stream_cnt = 0; 7517 7518 if (device->raid_io_stats) { 7519 for_each_online_cpu(cpu) { 7520 write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt; 7521 } 7522 } 7523 7524 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7525 7526 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt); 7527 } 7528 7529 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); 7530 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); 7531 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); 7532 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 7533 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); 7534 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 7535 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); 7536 static DEVICE_ATTR(sas_ncq_prio_enable, 0644, 7537 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); 7538 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); 7539 static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL); 7540 7541 static struct attribute *pqi_sdev_attrs[] = { 7542 &dev_attr_lunid.attr, 7543 &dev_attr_unique_id.attr, 7544 &dev_attr_path_info.attr, 7545 &dev_attr_sas_address.attr, 7546 &dev_attr_ssd_smart_path_enabled.attr, 7547 &dev_attr_raid_level.attr, 7548 &dev_attr_raid_bypass_cnt.attr, 7549 &dev_attr_sas_ncq_prio_enable.attr, 7550 &dev_attr_numa_node.attr, 7551 &dev_attr_write_stream_cnt.attr, 7552 NULL 7553 }; 7554 7555 ATTRIBUTE_GROUPS(pqi_sdev); 7556 7557 static const struct scsi_host_template pqi_driver_template = { 7558 .module = THIS_MODULE, 7559 .name = DRIVER_NAME_SHORT, 7560 .proc_name = DRIVER_NAME_SHORT, 7561 .queuecommand = pqi_scsi_queue_command, 7562 .scan_start = pqi_scan_start, 7563 .scan_finished = pqi_scan_finished, 7564 .this_id = -1, 7565 .eh_device_reset_handler = pqi_eh_device_reset_handler, 7566 .eh_abort_handler = pqi_eh_abort_handler, 7567 .ioctl = pqi_ioctl, 7568 .sdev_init = pqi_sdev_init, 7569 .sdev_configure = pqi_sdev_configure, 7570 .sdev_destroy = pqi_sdev_destroy, 7571 .map_queues = pqi_map_queues, 7572 .sdev_groups = pqi_sdev_groups, 7573 .shost_groups = pqi_shost_groups, 7574 .cmd_size = sizeof(struct pqi_cmd_priv), 7575 }; 7576 7577 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 7578 { 7579 int rc; 7580 struct Scsi_Host *shost; 7581 7582 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 7583 if (!shost) { 7584 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); 7585 return -ENOMEM; 7586 } 7587 7588 shost->io_port = 0; 7589 shost->n_io_port = 0; 7590 shost->this_id = -1; 7591 shost->max_channel = PQI_MAX_BUS; 7592 shost->max_cmd_len = MAX_COMMAND_SIZE; 7593 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE; 7594 shost->max_id = ~0; 7595 shost->max_sectors = ctrl_info->max_sectors; 7596 shost->can_queue = ctrl_info->scsi_ml_can_queue; 7597 shost->cmd_per_lun = shost->can_queue; 7598 shost->sg_tablesize = ctrl_info->sg_tablesize; 7599 shost->transportt = pqi_sas_transport_template; 7600 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 7601 shost->unique_id = shost->irq; 7602 shost->nr_hw_queues = ctrl_info->num_queue_groups; 7603 shost->host_tagset = 1; 7604 shost->hostdata[0] = (unsigned long)ctrl_info; 7605 7606 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 7607 if (rc) { 7608 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); 7609 goto free_host; 7610 } 7611 7612 rc = pqi_add_sas_host(shost, ctrl_info); 7613 if (rc) { 7614 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); 7615 goto remove_host; 7616 } 7617 7618 ctrl_info->scsi_host = shost; 7619 7620 return 0; 7621 7622 remove_host: 7623 scsi_remove_host(shost); 7624 free_host: 7625 scsi_host_put(shost); 7626 7627 return rc; 7628 } 7629 7630 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 7631 { 7632 struct Scsi_Host *shost; 7633 7634 pqi_delete_sas_host(ctrl_info); 7635 7636 shost = ctrl_info->scsi_host; 7637 if (!shost) 7638 return; 7639 7640 scsi_remove_host(shost); 7641 scsi_host_put(shost); 7642 } 7643 7644 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 7645 { 7646 int rc = 0; 7647 struct pqi_device_registers __iomem *pqi_registers; 7648 unsigned long timeout; 7649 unsigned int timeout_msecs; 7650 union pqi_reset_register reset_reg; 7651 7652 pqi_registers = ctrl_info->pqi_registers; 7653 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 7654 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 7655 7656 while (1) { 7657 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 7658 reset_reg.all_bits = readl(&pqi_registers->device_reset); 7659 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 7660 break; 7661 if (!sis_is_firmware_running(ctrl_info)) { 7662 rc = -ENXIO; 7663 break; 7664 } 7665 if (time_after(jiffies, timeout)) { 7666 rc = -ETIMEDOUT; 7667 break; 7668 } 7669 } 7670 7671 return rc; 7672 } 7673 7674 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 7675 { 7676 int rc; 7677 union pqi_reset_register reset_reg; 7678 7679 if (ctrl_info->pqi_reset_quiesce_supported) { 7680 rc = sis_pqi_reset_quiesce(ctrl_info); 7681 if (rc) { 7682 dev_err(&ctrl_info->pci_dev->dev, 7683 "PQI reset failed during quiesce with error %d\n", rc); 7684 return rc; 7685 } 7686 } 7687 7688 reset_reg.all_bits = 0; 7689 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 7690 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 7691 7692 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 7693 7694 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 7695 if (rc) 7696 dev_err(&ctrl_info->pci_dev->dev, 7697 "PQI reset failed with error %d\n", rc); 7698 7699 return rc; 7700 } 7701 7702 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) 7703 { 7704 int rc; 7705 struct bmic_sense_subsystem_info *sense_info; 7706 7707 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL); 7708 if (!sense_info) 7709 return -ENOMEM; 7710 7711 rc = pqi_sense_subsystem_info(ctrl_info, sense_info); 7712 if (rc) 7713 goto out; 7714 7715 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, 7716 sizeof(sense_info->ctrl_serial_number)); 7717 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; 7718 7719 out: 7720 kfree(sense_info); 7721 7722 return rc; 7723 } 7724 7725 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) 7726 { 7727 int rc; 7728 struct bmic_identify_controller *identify; 7729 7730 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 7731 if (!identify) 7732 return -ENOMEM; 7733 7734 rc = pqi_identify_controller(ctrl_info, identify); 7735 if (rc) 7736 goto out; 7737 7738 if (get_unaligned_le32(&identify->extra_controller_flags) & 7739 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) { 7740 memcpy(ctrl_info->firmware_version, 7741 identify->firmware_version_long, 7742 sizeof(identify->firmware_version_long)); 7743 } else { 7744 memcpy(ctrl_info->firmware_version, 7745 identify->firmware_version_short, 7746 sizeof(identify->firmware_version_short)); 7747 ctrl_info->firmware_version 7748 [sizeof(identify->firmware_version_short)] = '\0'; 7749 snprintf(ctrl_info->firmware_version + 7750 strlen(ctrl_info->firmware_version), 7751 sizeof(ctrl_info->firmware_version) - 7752 sizeof(identify->firmware_version_short), 7753 "-%u", 7754 get_unaligned_le16(&identify->firmware_build_number)); 7755 } 7756 7757 memcpy(ctrl_info->model, identify->product_id, 7758 sizeof(identify->product_id)); 7759 ctrl_info->model[sizeof(identify->product_id)] = '\0'; 7760 7761 memcpy(ctrl_info->vendor, identify->vendor_id, 7762 sizeof(identify->vendor_id)); 7763 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; 7764 7765 dev_info(&ctrl_info->pci_dev->dev, 7766 "Firmware version: %s\n", ctrl_info->firmware_version); 7767 7768 out: 7769 kfree(identify); 7770 7771 return rc; 7772 } 7773 7774 struct pqi_config_table_section_info { 7775 struct pqi_ctrl_info *ctrl_info; 7776 void *section; 7777 u32 section_offset; 7778 void __iomem *section_iomem_addr; 7779 }; 7780 7781 static inline bool pqi_is_firmware_feature_supported( 7782 struct pqi_config_table_firmware_features *firmware_features, 7783 unsigned int bit_position) 7784 { 7785 unsigned int byte_index; 7786 7787 byte_index = bit_position / BITS_PER_BYTE; 7788 7789 if (byte_index >= le16_to_cpu(firmware_features->num_elements)) 7790 return false; 7791 7792 return firmware_features->features_supported[byte_index] & 7793 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 7794 } 7795 7796 static inline bool pqi_is_firmware_feature_enabled( 7797 struct pqi_config_table_firmware_features *firmware_features, 7798 void __iomem *firmware_features_iomem_addr, 7799 unsigned int bit_position) 7800 { 7801 unsigned int byte_index; 7802 u8 __iomem *features_enabled_iomem_addr; 7803 7804 byte_index = (bit_position / BITS_PER_BYTE) + 7805 (le16_to_cpu(firmware_features->num_elements) * 2); 7806 7807 features_enabled_iomem_addr = firmware_features_iomem_addr + 7808 offsetof(struct pqi_config_table_firmware_features, 7809 features_supported) + byte_index; 7810 7811 return *((__force u8 *)features_enabled_iomem_addr) & 7812 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 7813 } 7814 7815 static inline void pqi_request_firmware_feature( 7816 struct pqi_config_table_firmware_features *firmware_features, 7817 unsigned int bit_position) 7818 { 7819 unsigned int byte_index; 7820 7821 byte_index = (bit_position / BITS_PER_BYTE) + 7822 le16_to_cpu(firmware_features->num_elements); 7823 7824 firmware_features->features_supported[byte_index] |= 7825 (1 << (bit_position % BITS_PER_BYTE)); 7826 } 7827 7828 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, 7829 u16 first_section, u16 last_section) 7830 { 7831 struct pqi_vendor_general_request request; 7832 7833 memset(&request, 0, sizeof(request)); 7834 7835 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 7836 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 7837 &request.header.iu_length); 7838 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, 7839 &request.function_code); 7840 put_unaligned_le16(first_section, 7841 &request.data.config_table_update.first_section); 7842 put_unaligned_le16(last_section, 7843 &request.data.config_table_update.last_section); 7844 7845 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 7846 } 7847 7848 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, 7849 struct pqi_config_table_firmware_features *firmware_features, 7850 void __iomem *firmware_features_iomem_addr) 7851 { 7852 void *features_requested; 7853 void __iomem *features_requested_iomem_addr; 7854 void __iomem *host_max_known_feature_iomem_addr; 7855 7856 features_requested = firmware_features->features_supported + 7857 le16_to_cpu(firmware_features->num_elements); 7858 7859 features_requested_iomem_addr = firmware_features_iomem_addr + 7860 (features_requested - (void *)firmware_features); 7861 7862 memcpy_toio(features_requested_iomem_addr, features_requested, 7863 le16_to_cpu(firmware_features->num_elements)); 7864 7865 if (pqi_is_firmware_feature_supported(firmware_features, 7866 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) { 7867 host_max_known_feature_iomem_addr = 7868 features_requested_iomem_addr + 7869 (le16_to_cpu(firmware_features->num_elements) * 2) + 7870 sizeof(__le16); 7871 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr); 7872 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1); 7873 } 7874 7875 return pqi_config_table_update(ctrl_info, 7876 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, 7877 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); 7878 } 7879 7880 struct pqi_firmware_feature { 7881 char *feature_name; 7882 unsigned int feature_bit; 7883 bool supported; 7884 bool enabled; 7885 void (*feature_status)(struct pqi_ctrl_info *ctrl_info, 7886 struct pqi_firmware_feature *firmware_feature); 7887 }; 7888 7889 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, 7890 struct pqi_firmware_feature *firmware_feature) 7891 { 7892 if (!firmware_feature->supported) { 7893 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", 7894 firmware_feature->feature_name); 7895 return; 7896 } 7897 7898 if (firmware_feature->enabled) { 7899 dev_info(&ctrl_info->pci_dev->dev, 7900 "%s enabled\n", firmware_feature->feature_name); 7901 return; 7902 } 7903 7904 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", 7905 firmware_feature->feature_name); 7906 } 7907 7908 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, 7909 struct pqi_firmware_feature *firmware_feature) 7910 { 7911 switch (firmware_feature->feature_bit) { 7912 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS: 7913 ctrl_info->enable_r1_writes = firmware_feature->enabled; 7914 break; 7915 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS: 7916 ctrl_info->enable_r5_writes = firmware_feature->enabled; 7917 break; 7918 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS: 7919 ctrl_info->enable_r6_writes = firmware_feature->enabled; 7920 break; 7921 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: 7922 ctrl_info->soft_reset_handshake_supported = 7923 firmware_feature->enabled && 7924 pqi_read_soft_reset_status(ctrl_info); 7925 break; 7926 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: 7927 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; 7928 break; 7929 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: 7930 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; 7931 break; 7932 case PQI_FIRMWARE_FEATURE_FW_TRIAGE: 7933 ctrl_info->firmware_triage_supported = firmware_feature->enabled; 7934 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled); 7935 break; 7936 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5: 7937 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled; 7938 break; 7939 case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT: 7940 ctrl_info->multi_lun_device_supported = firmware_feature->enabled; 7941 break; 7942 case PQI_FIRMWARE_FEATURE_CTRL_LOGGING: 7943 ctrl_info->ctrl_logging_supported = firmware_feature->enabled; 7944 break; 7945 } 7946 7947 pqi_firmware_feature_status(ctrl_info, firmware_feature); 7948 } 7949 7950 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, 7951 struct pqi_firmware_feature *firmware_feature) 7952 { 7953 if (firmware_feature->feature_status) 7954 firmware_feature->feature_status(ctrl_info, firmware_feature); 7955 } 7956 7957 static DEFINE_MUTEX(pqi_firmware_features_mutex); 7958 7959 static struct pqi_firmware_feature pqi_firmware_features[] = { 7960 { 7961 .feature_name = "Online Firmware Activation", 7962 .feature_bit = PQI_FIRMWARE_FEATURE_OFA, 7963 .feature_status = pqi_firmware_feature_status, 7964 }, 7965 { 7966 .feature_name = "Serial Management Protocol", 7967 .feature_bit = PQI_FIRMWARE_FEATURE_SMP, 7968 .feature_status = pqi_firmware_feature_status, 7969 }, 7970 { 7971 .feature_name = "Maximum Known Feature", 7972 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE, 7973 .feature_status = pqi_firmware_feature_status, 7974 }, 7975 { 7976 .feature_name = "RAID 0 Read Bypass", 7977 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS, 7978 .feature_status = pqi_firmware_feature_status, 7979 }, 7980 { 7981 .feature_name = "RAID 1 Read Bypass", 7982 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS, 7983 .feature_status = pqi_firmware_feature_status, 7984 }, 7985 { 7986 .feature_name = "RAID 5 Read Bypass", 7987 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS, 7988 .feature_status = pqi_firmware_feature_status, 7989 }, 7990 { 7991 .feature_name = "RAID 6 Read Bypass", 7992 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS, 7993 .feature_status = pqi_firmware_feature_status, 7994 }, 7995 { 7996 .feature_name = "RAID 0 Write Bypass", 7997 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS, 7998 .feature_status = pqi_firmware_feature_status, 7999 }, 8000 { 8001 .feature_name = "RAID 1 Write Bypass", 8002 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS, 8003 .feature_status = pqi_ctrl_update_feature_flags, 8004 }, 8005 { 8006 .feature_name = "RAID 5 Write Bypass", 8007 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS, 8008 .feature_status = pqi_ctrl_update_feature_flags, 8009 }, 8010 { 8011 .feature_name = "RAID 6 Write Bypass", 8012 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS, 8013 .feature_status = pqi_ctrl_update_feature_flags, 8014 }, 8015 { 8016 .feature_name = "New Soft Reset Handshake", 8017 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, 8018 .feature_status = pqi_ctrl_update_feature_flags, 8019 }, 8020 { 8021 .feature_name = "RAID IU Timeout", 8022 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, 8023 .feature_status = pqi_ctrl_update_feature_flags, 8024 }, 8025 { 8026 .feature_name = "TMF IU Timeout", 8027 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, 8028 .feature_status = pqi_ctrl_update_feature_flags, 8029 }, 8030 { 8031 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe", 8032 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME, 8033 .feature_status = pqi_firmware_feature_status, 8034 }, 8035 { 8036 .feature_name = "Firmware Triage", 8037 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE, 8038 .feature_status = pqi_ctrl_update_feature_flags, 8039 }, 8040 { 8041 .feature_name = "RPL Extended Formats 4 and 5", 8042 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5, 8043 .feature_status = pqi_ctrl_update_feature_flags, 8044 }, 8045 { 8046 .feature_name = "Multi-LUN Target", 8047 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT, 8048 .feature_status = pqi_ctrl_update_feature_flags, 8049 }, 8050 { 8051 .feature_name = "Controller Data Logging", 8052 .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING, 8053 .feature_status = pqi_ctrl_update_feature_flags, 8054 }, 8055 }; 8056 8057 static void pqi_process_firmware_features( 8058 struct pqi_config_table_section_info *section_info) 8059 { 8060 int rc; 8061 struct pqi_ctrl_info *ctrl_info; 8062 struct pqi_config_table_firmware_features *firmware_features; 8063 void __iomem *firmware_features_iomem_addr; 8064 unsigned int i; 8065 unsigned int num_features_supported; 8066 8067 ctrl_info = section_info->ctrl_info; 8068 firmware_features = section_info->section; 8069 firmware_features_iomem_addr = section_info->section_iomem_addr; 8070 8071 for (i = 0, num_features_supported = 0; 8072 i < ARRAY_SIZE(pqi_firmware_features); i++) { 8073 if (pqi_is_firmware_feature_supported(firmware_features, 8074 pqi_firmware_features[i].feature_bit)) { 8075 pqi_firmware_features[i].supported = true; 8076 num_features_supported++; 8077 } else { 8078 pqi_firmware_feature_update(ctrl_info, 8079 &pqi_firmware_features[i]); 8080 } 8081 } 8082 8083 if (num_features_supported == 0) 8084 return; 8085 8086 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8087 if (!pqi_firmware_features[i].supported) 8088 continue; 8089 pqi_request_firmware_feature(firmware_features, 8090 pqi_firmware_features[i].feature_bit); 8091 } 8092 8093 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, 8094 firmware_features_iomem_addr); 8095 if (rc) { 8096 dev_err(&ctrl_info->pci_dev->dev, 8097 "failed to enable firmware features in PQI configuration table\n"); 8098 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8099 if (!pqi_firmware_features[i].supported) 8100 continue; 8101 pqi_firmware_feature_update(ctrl_info, 8102 &pqi_firmware_features[i]); 8103 } 8104 return; 8105 } 8106 8107 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8108 if (!pqi_firmware_features[i].supported) 8109 continue; 8110 if (pqi_is_firmware_feature_enabled(firmware_features, 8111 firmware_features_iomem_addr, 8112 pqi_firmware_features[i].feature_bit)) { 8113 pqi_firmware_features[i].enabled = true; 8114 } 8115 pqi_firmware_feature_update(ctrl_info, 8116 &pqi_firmware_features[i]); 8117 } 8118 } 8119 8120 static void pqi_init_firmware_features(void) 8121 { 8122 unsigned int i; 8123 8124 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8125 pqi_firmware_features[i].supported = false; 8126 pqi_firmware_features[i].enabled = false; 8127 } 8128 } 8129 8130 static void pqi_process_firmware_features_section( 8131 struct pqi_config_table_section_info *section_info) 8132 { 8133 mutex_lock(&pqi_firmware_features_mutex); 8134 pqi_init_firmware_features(); 8135 pqi_process_firmware_features(section_info); 8136 mutex_unlock(&pqi_firmware_features_mutex); 8137 } 8138 8139 /* 8140 * Reset all controller settings that can be initialized during the processing 8141 * of the PQI Configuration Table. 8142 */ 8143 8144 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) 8145 { 8146 ctrl_info->heartbeat_counter = NULL; 8147 ctrl_info->soft_reset_status = NULL; 8148 ctrl_info->soft_reset_handshake_supported = false; 8149 ctrl_info->enable_r1_writes = false; 8150 ctrl_info->enable_r5_writes = false; 8151 ctrl_info->enable_r6_writes = false; 8152 ctrl_info->raid_iu_timeout_supported = false; 8153 ctrl_info->tmf_iu_timeout_supported = false; 8154 ctrl_info->firmware_triage_supported = false; 8155 ctrl_info->rpl_extended_format_4_5_supported = false; 8156 ctrl_info->multi_lun_device_supported = false; 8157 ctrl_info->ctrl_logging_supported = false; 8158 } 8159 8160 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 8161 { 8162 u32 table_length; 8163 u32 section_offset; 8164 bool firmware_feature_section_present; 8165 void __iomem *table_iomem_addr; 8166 struct pqi_config_table *config_table; 8167 struct pqi_config_table_section_header *section; 8168 struct pqi_config_table_section_info section_info; 8169 struct pqi_config_table_section_info feature_section_info = {0}; 8170 8171 table_length = ctrl_info->config_table_length; 8172 if (table_length == 0) 8173 return 0; 8174 8175 config_table = kmalloc(table_length, GFP_KERNEL); 8176 if (!config_table) { 8177 dev_err(&ctrl_info->pci_dev->dev, 8178 "failed to allocate memory for PQI configuration table\n"); 8179 return -ENOMEM; 8180 } 8181 8182 /* 8183 * Copy the config table contents from I/O memory space into the 8184 * temporary buffer. 8185 */ 8186 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; 8187 memcpy_fromio(config_table, table_iomem_addr, table_length); 8188 8189 firmware_feature_section_present = false; 8190 section_info.ctrl_info = ctrl_info; 8191 section_offset = get_unaligned_le32(&config_table->first_section_offset); 8192 8193 while (section_offset) { 8194 section = (void *)config_table + section_offset; 8195 8196 section_info.section = section; 8197 section_info.section_offset = section_offset; 8198 section_info.section_iomem_addr = table_iomem_addr + section_offset; 8199 8200 switch (get_unaligned_le16(§ion->section_id)) { 8201 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: 8202 firmware_feature_section_present = true; 8203 feature_section_info = section_info; 8204 break; 8205 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 8206 if (pqi_disable_heartbeat) 8207 dev_warn(&ctrl_info->pci_dev->dev, 8208 "heartbeat disabled by module parameter\n"); 8209 else 8210 ctrl_info->heartbeat_counter = 8211 table_iomem_addr + 8212 section_offset + 8213 offsetof(struct pqi_config_table_heartbeat, 8214 heartbeat_counter); 8215 break; 8216 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: 8217 ctrl_info->soft_reset_status = 8218 table_iomem_addr + 8219 section_offset + 8220 offsetof(struct pqi_config_table_soft_reset, 8221 soft_reset_status); 8222 break; 8223 } 8224 8225 section_offset = get_unaligned_le16(§ion->next_section_offset); 8226 } 8227 8228 /* 8229 * We process the firmware feature section after all other sections 8230 * have been processed so that the feature bit callbacks can take 8231 * into account the settings configured by other sections. 8232 */ 8233 if (firmware_feature_section_present) 8234 pqi_process_firmware_features_section(&feature_section_info); 8235 8236 kfree(config_table); 8237 8238 return 0; 8239 } 8240 8241 /* Switches the controller from PQI mode back into SIS mode. */ 8242 8243 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 8244 { 8245 int rc; 8246 8247 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 8248 rc = pqi_reset(ctrl_info); 8249 if (rc) 8250 return rc; 8251 rc = sis_reenable_sis_mode(ctrl_info); 8252 if (rc) { 8253 dev_err(&ctrl_info->pci_dev->dev, 8254 "re-enabling SIS mode failed with error %d\n", rc); 8255 return rc; 8256 } 8257 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 8258 8259 return 0; 8260 } 8261 8262 /* 8263 * If the controller isn't already in SIS mode, this function forces it into 8264 * SIS mode. 8265 */ 8266 8267 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 8268 { 8269 if (!sis_is_firmware_running(ctrl_info)) 8270 return -ENXIO; 8271 8272 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 8273 return 0; 8274 8275 if (sis_is_kernel_up(ctrl_info)) { 8276 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 8277 return 0; 8278 } 8279 8280 return pqi_revert_to_sis_mode(ctrl_info); 8281 } 8282 8283 static void pqi_perform_lockup_action(void) 8284 { 8285 switch (pqi_lockup_action) { 8286 case PANIC: 8287 panic("FATAL: Smart Family Controller lockup detected"); 8288 break; 8289 case REBOOT: 8290 emergency_restart(); 8291 break; 8292 case NONE: 8293 default: 8294 break; 8295 } 8296 } 8297 8298 #define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024) 8299 #define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS) 8300 8301 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 8302 { 8303 int rc; 8304 u32 product_id; 8305 8306 if (reset_devices) { 8307 if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) { 8308 rc = sis_wait_for_fw_triage_completion(ctrl_info); 8309 if (rc) 8310 return rc; 8311 } 8312 if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) { 8313 sis_notify_kdump(ctrl_info); 8314 rc = sis_wait_for_ctrl_logging_completion(ctrl_info); 8315 if (rc) 8316 return rc; 8317 } 8318 sis_soft_reset(ctrl_info); 8319 ssleep(PQI_POST_RESET_DELAY_SECS); 8320 } else { 8321 rc = pqi_force_sis_mode(ctrl_info); 8322 if (rc) 8323 return rc; 8324 } 8325 8326 /* 8327 * Wait until the controller is ready to start accepting SIS 8328 * commands. 8329 */ 8330 rc = sis_wait_for_ctrl_ready(ctrl_info); 8331 if (rc) { 8332 if (reset_devices) { 8333 dev_err(&ctrl_info->pci_dev->dev, 8334 "kdump init failed with error %d\n", rc); 8335 pqi_lockup_action = REBOOT; 8336 pqi_perform_lockup_action(); 8337 } 8338 return rc; 8339 } 8340 8341 /* 8342 * Get the controller properties. This allows us to determine 8343 * whether or not it supports PQI mode. 8344 */ 8345 rc = sis_get_ctrl_properties(ctrl_info); 8346 if (rc) { 8347 dev_err(&ctrl_info->pci_dev->dev, 8348 "error obtaining controller properties\n"); 8349 return rc; 8350 } 8351 8352 rc = sis_get_pqi_capabilities(ctrl_info); 8353 if (rc) { 8354 dev_err(&ctrl_info->pci_dev->dev, 8355 "error obtaining controller capabilities\n"); 8356 return rc; 8357 } 8358 8359 product_id = sis_get_product_id(ctrl_info); 8360 ctrl_info->product_id = (u8)product_id; 8361 ctrl_info->product_revision = (u8)(product_id >> 8); 8362 8363 if (is_kdump_kernel()) { 8364 if (ctrl_info->max_outstanding_requests > 8365 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 8366 ctrl_info->max_outstanding_requests = 8367 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 8368 } else { 8369 if (ctrl_info->max_outstanding_requests > 8370 PQI_MAX_OUTSTANDING_REQUESTS) 8371 ctrl_info->max_outstanding_requests = 8372 PQI_MAX_OUTSTANDING_REQUESTS; 8373 } 8374 8375 pqi_calculate_io_resources(ctrl_info); 8376 8377 rc = pqi_alloc_error_buffer(ctrl_info); 8378 if (rc) { 8379 dev_err(&ctrl_info->pci_dev->dev, 8380 "failed to allocate PQI error buffer\n"); 8381 return rc; 8382 } 8383 8384 /* 8385 * If the function we are about to call succeeds, the 8386 * controller will transition from legacy SIS mode 8387 * into PQI mode. 8388 */ 8389 rc = sis_init_base_struct_addr(ctrl_info); 8390 if (rc) { 8391 dev_err(&ctrl_info->pci_dev->dev, 8392 "error initializing PQI mode\n"); 8393 return rc; 8394 } 8395 8396 /* Wait for the controller to complete the SIS -> PQI transition. */ 8397 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 8398 if (rc) { 8399 dev_err(&ctrl_info->pci_dev->dev, 8400 "transition to PQI mode failed\n"); 8401 return rc; 8402 } 8403 8404 /* From here on, we are running in PQI mode. */ 8405 ctrl_info->pqi_mode_enabled = true; 8406 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 8407 8408 rc = pqi_alloc_admin_queues(ctrl_info); 8409 if (rc) { 8410 dev_err(&ctrl_info->pci_dev->dev, 8411 "failed to allocate admin queues\n"); 8412 return rc; 8413 } 8414 8415 rc = pqi_create_admin_queues(ctrl_info); 8416 if (rc) { 8417 dev_err(&ctrl_info->pci_dev->dev, 8418 "error creating admin queues\n"); 8419 return rc; 8420 } 8421 8422 rc = pqi_report_device_capability(ctrl_info); 8423 if (rc) { 8424 dev_err(&ctrl_info->pci_dev->dev, 8425 "obtaining device capability failed\n"); 8426 return rc; 8427 } 8428 8429 rc = pqi_validate_device_capability(ctrl_info); 8430 if (rc) 8431 return rc; 8432 8433 pqi_calculate_queue_resources(ctrl_info); 8434 8435 rc = pqi_enable_msix_interrupts(ctrl_info); 8436 if (rc) 8437 return rc; 8438 8439 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 8440 ctrl_info->max_msix_vectors = 8441 ctrl_info->num_msix_vectors_enabled; 8442 pqi_calculate_queue_resources(ctrl_info); 8443 } 8444 8445 rc = pqi_alloc_io_resources(ctrl_info); 8446 if (rc) 8447 return rc; 8448 8449 rc = pqi_alloc_operational_queues(ctrl_info); 8450 if (rc) { 8451 dev_err(&ctrl_info->pci_dev->dev, 8452 "failed to allocate operational queues\n"); 8453 return rc; 8454 } 8455 8456 pqi_init_operational_queues(ctrl_info); 8457 8458 rc = pqi_create_queues(ctrl_info); 8459 if (rc) 8460 return rc; 8461 8462 rc = pqi_request_irqs(ctrl_info); 8463 if (rc) 8464 return rc; 8465 8466 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 8467 8468 ctrl_info->controller_online = true; 8469 8470 rc = pqi_process_config_table(ctrl_info); 8471 if (rc) 8472 return rc; 8473 8474 pqi_start_heartbeat_timer(ctrl_info); 8475 8476 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { 8477 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); 8478 if (rc) { /* Supported features not returned correctly. */ 8479 dev_err(&ctrl_info->pci_dev->dev, 8480 "error obtaining advanced RAID bypass configuration\n"); 8481 return rc; 8482 } 8483 ctrl_info->ciss_report_log_flags |= 8484 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; 8485 } 8486 8487 rc = pqi_enable_events(ctrl_info); 8488 if (rc) { 8489 dev_err(&ctrl_info->pci_dev->dev, 8490 "error enabling events\n"); 8491 return rc; 8492 } 8493 8494 /* Register with the SCSI subsystem. */ 8495 rc = pqi_register_scsi(ctrl_info); 8496 if (rc) 8497 return rc; 8498 8499 if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) { 8500 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE); 8501 pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); 8502 } 8503 8504 rc = pqi_get_ctrl_product_details(ctrl_info); 8505 if (rc) { 8506 dev_err(&ctrl_info->pci_dev->dev, 8507 "error obtaining product details\n"); 8508 return rc; 8509 } 8510 8511 rc = pqi_get_ctrl_serial_number(ctrl_info); 8512 if (rc) { 8513 dev_err(&ctrl_info->pci_dev->dev, 8514 "error obtaining ctrl serial number\n"); 8515 return rc; 8516 } 8517 8518 rc = pqi_set_diag_rescan(ctrl_info); 8519 if (rc) { 8520 dev_err(&ctrl_info->pci_dev->dev, 8521 "error enabling multi-lun rescan\n"); 8522 return rc; 8523 } 8524 8525 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 8526 if (rc) { 8527 dev_err(&ctrl_info->pci_dev->dev, 8528 "error updating host wellness\n"); 8529 return rc; 8530 } 8531 8532 pqi_schedule_update_time_worker(ctrl_info); 8533 8534 pqi_scan_scsi_devices(ctrl_info); 8535 8536 return 0; 8537 } 8538 8539 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 8540 { 8541 unsigned int i; 8542 struct pqi_admin_queues *admin_queues; 8543 struct pqi_event_queue *event_queue; 8544 8545 admin_queues = &ctrl_info->admin_queues; 8546 admin_queues->iq_pi_copy = 0; 8547 admin_queues->oq_ci_copy = 0; 8548 writel(0, admin_queues->oq_pi); 8549 8550 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 8551 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 8552 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 8553 ctrl_info->queue_groups[i].oq_ci_copy = 0; 8554 8555 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); 8556 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); 8557 writel(0, ctrl_info->queue_groups[i].oq_pi); 8558 } 8559 8560 event_queue = &ctrl_info->event_queue; 8561 writel(0, event_queue->oq_pi); 8562 event_queue->oq_ci_copy = 0; 8563 } 8564 8565 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 8566 { 8567 int rc; 8568 8569 rc = pqi_force_sis_mode(ctrl_info); 8570 if (rc) 8571 return rc; 8572 8573 /* 8574 * Wait until the controller is ready to start accepting SIS 8575 * commands. 8576 */ 8577 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 8578 if (rc) 8579 return rc; 8580 8581 /* 8582 * Get the controller properties. This allows us to determine 8583 * whether or not it supports PQI mode. 8584 */ 8585 rc = sis_get_ctrl_properties(ctrl_info); 8586 if (rc) { 8587 dev_err(&ctrl_info->pci_dev->dev, 8588 "error obtaining controller properties\n"); 8589 return rc; 8590 } 8591 8592 rc = sis_get_pqi_capabilities(ctrl_info); 8593 if (rc) { 8594 dev_err(&ctrl_info->pci_dev->dev, 8595 "error obtaining controller capabilities\n"); 8596 return rc; 8597 } 8598 8599 /* 8600 * If the function we are about to call succeeds, the 8601 * controller will transition from legacy SIS mode 8602 * into PQI mode. 8603 */ 8604 rc = sis_init_base_struct_addr(ctrl_info); 8605 if (rc) { 8606 dev_err(&ctrl_info->pci_dev->dev, 8607 "error initializing PQI mode\n"); 8608 return rc; 8609 } 8610 8611 /* Wait for the controller to complete the SIS -> PQI transition. */ 8612 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 8613 if (rc) { 8614 dev_err(&ctrl_info->pci_dev->dev, 8615 "transition to PQI mode failed\n"); 8616 return rc; 8617 } 8618 8619 /* From here on, we are running in PQI mode. */ 8620 ctrl_info->pqi_mode_enabled = true; 8621 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 8622 8623 pqi_reinit_queues(ctrl_info); 8624 8625 rc = pqi_create_admin_queues(ctrl_info); 8626 if (rc) { 8627 dev_err(&ctrl_info->pci_dev->dev, 8628 "error creating admin queues\n"); 8629 return rc; 8630 } 8631 8632 rc = pqi_create_queues(ctrl_info); 8633 if (rc) 8634 return rc; 8635 8636 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 8637 8638 ctrl_info->controller_online = true; 8639 pqi_ctrl_unblock_requests(ctrl_info); 8640 8641 pqi_ctrl_reset_config(ctrl_info); 8642 8643 rc = pqi_process_config_table(ctrl_info); 8644 if (rc) 8645 return rc; 8646 8647 pqi_start_heartbeat_timer(ctrl_info); 8648 8649 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { 8650 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); 8651 if (rc) { 8652 dev_err(&ctrl_info->pci_dev->dev, 8653 "error obtaining advanced RAID bypass configuration\n"); 8654 return rc; 8655 } 8656 ctrl_info->ciss_report_log_flags |= 8657 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; 8658 } 8659 8660 rc = pqi_enable_events(ctrl_info); 8661 if (rc) { 8662 dev_err(&ctrl_info->pci_dev->dev, 8663 "error enabling events\n"); 8664 return rc; 8665 } 8666 8667 rc = pqi_get_ctrl_product_details(ctrl_info); 8668 if (rc) { 8669 dev_err(&ctrl_info->pci_dev->dev, 8670 "error obtaining product details\n"); 8671 return rc; 8672 } 8673 8674 rc = pqi_set_diag_rescan(ctrl_info); 8675 if (rc) { 8676 dev_err(&ctrl_info->pci_dev->dev, 8677 "error enabling multi-lun rescan\n"); 8678 return rc; 8679 } 8680 8681 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 8682 if (rc) { 8683 dev_err(&ctrl_info->pci_dev->dev, 8684 "error updating host wellness\n"); 8685 return rc; 8686 } 8687 8688 if (pqi_ofa_in_progress(ctrl_info)) { 8689 pqi_ctrl_unblock_scan(ctrl_info); 8690 if (ctrl_info->ctrl_logging_supported) { 8691 if (!ctrl_info->ctrl_log_memory.host_memory) 8692 pqi_host_setup_buffer(ctrl_info, 8693 &ctrl_info->ctrl_log_memory, 8694 PQI_CTRL_LOG_TOTAL_SIZE, 8695 PQI_CTRL_LOG_MIN_SIZE); 8696 pqi_host_memory_update(ctrl_info, 8697 &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); 8698 } else { 8699 if (ctrl_info->ctrl_log_memory.host_memory) 8700 pqi_host_free_buffer(ctrl_info, 8701 &ctrl_info->ctrl_log_memory); 8702 } 8703 } 8704 8705 pqi_scan_scsi_devices(ctrl_info); 8706 8707 return 0; 8708 } 8709 8710 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) 8711 { 8712 int rc; 8713 8714 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 8715 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 8716 8717 return pcibios_err_to_errno(rc); 8718 } 8719 8720 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 8721 { 8722 int rc; 8723 u64 mask; 8724 8725 rc = pci_enable_device(ctrl_info->pci_dev); 8726 if (rc) { 8727 dev_err(&ctrl_info->pci_dev->dev, 8728 "failed to enable PCI device\n"); 8729 return rc; 8730 } 8731 8732 if (sizeof(dma_addr_t) > 4) 8733 mask = DMA_BIT_MASK(64); 8734 else 8735 mask = DMA_BIT_MASK(32); 8736 8737 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); 8738 if (rc) { 8739 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 8740 goto disable_device; 8741 } 8742 8743 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 8744 if (rc) { 8745 dev_err(&ctrl_info->pci_dev->dev, 8746 "failed to obtain PCI resources\n"); 8747 goto disable_device; 8748 } 8749 8750 ctrl_info->iomem_base = ioremap(pci_resource_start( 8751 ctrl_info->pci_dev, 0), 8752 pci_resource_len(ctrl_info->pci_dev, 0)); 8753 if (!ctrl_info->iomem_base) { 8754 dev_err(&ctrl_info->pci_dev->dev, 8755 "failed to map memory for controller registers\n"); 8756 rc = -ENOMEM; 8757 goto release_regions; 8758 } 8759 8760 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 8761 8762 /* Increase the PCIe completion timeout. */ 8763 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 8764 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 8765 if (rc) { 8766 dev_err(&ctrl_info->pci_dev->dev, 8767 "failed to set PCIe completion timeout\n"); 8768 goto release_regions; 8769 } 8770 8771 /* Enable bus mastering. */ 8772 pci_set_master(ctrl_info->pci_dev); 8773 8774 ctrl_info->registers = ctrl_info->iomem_base; 8775 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 8776 8777 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 8778 8779 return 0; 8780 8781 release_regions: 8782 pci_release_regions(ctrl_info->pci_dev); 8783 disable_device: 8784 pci_disable_device(ctrl_info->pci_dev); 8785 8786 return rc; 8787 } 8788 8789 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 8790 { 8791 iounmap(ctrl_info->iomem_base); 8792 pci_release_regions(ctrl_info->pci_dev); 8793 if (pci_is_enabled(ctrl_info->pci_dev)) 8794 pci_disable_device(ctrl_info->pci_dev); 8795 pci_set_drvdata(ctrl_info->pci_dev, NULL); 8796 } 8797 8798 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 8799 { 8800 struct pqi_ctrl_info *ctrl_info; 8801 8802 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 8803 GFP_KERNEL, numa_node); 8804 if (!ctrl_info) 8805 return NULL; 8806 8807 mutex_init(&ctrl_info->scan_mutex); 8808 mutex_init(&ctrl_info->lun_reset_mutex); 8809 mutex_init(&ctrl_info->ofa_mutex); 8810 8811 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 8812 spin_lock_init(&ctrl_info->scsi_device_list_lock); 8813 8814 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 8815 atomic_set(&ctrl_info->num_interrupts, 0); 8816 8817 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 8818 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 8819 8820 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 8821 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 8822 8823 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); 8824 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); 8825 8826 sema_init(&ctrl_info->sync_request_sem, 8827 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 8828 init_waitqueue_head(&ctrl_info->block_requests_wait); 8829 8830 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 8831 ctrl_info->irq_mode = IRQ_MODE_NONE; 8832 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 8833 8834 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; 8835 ctrl_info->max_transfer_encrypted_sas_sata = 8836 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA; 8837 ctrl_info->max_transfer_encrypted_nvme = 8838 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME; 8839 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; 8840 ctrl_info->max_write_raid_1_10_2drive = ~0; 8841 ctrl_info->max_write_raid_1_10_3drive = ~0; 8842 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts; 8843 8844 return ctrl_info; 8845 } 8846 8847 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 8848 { 8849 kfree(ctrl_info); 8850 } 8851 8852 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 8853 { 8854 pqi_free_irqs(ctrl_info); 8855 pqi_disable_msix_interrupts(ctrl_info); 8856 } 8857 8858 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 8859 { 8860 pqi_free_interrupts(ctrl_info); 8861 if (ctrl_info->queue_memory_base) 8862 dma_free_coherent(&ctrl_info->pci_dev->dev, 8863 ctrl_info->queue_memory_length, 8864 ctrl_info->queue_memory_base, 8865 ctrl_info->queue_memory_base_dma_handle); 8866 if (ctrl_info->admin_queue_memory_base) 8867 dma_free_coherent(&ctrl_info->pci_dev->dev, 8868 ctrl_info->admin_queue_memory_length, 8869 ctrl_info->admin_queue_memory_base, 8870 ctrl_info->admin_queue_memory_base_dma_handle); 8871 pqi_free_all_io_requests(ctrl_info); 8872 if (ctrl_info->error_buffer) 8873 dma_free_coherent(&ctrl_info->pci_dev->dev, 8874 ctrl_info->error_buffer_length, 8875 ctrl_info->error_buffer, 8876 ctrl_info->error_buffer_dma_handle); 8877 if (ctrl_info->iomem_base) 8878 pqi_cleanup_pci_init(ctrl_info); 8879 pqi_free_ctrl_info(ctrl_info); 8880 } 8881 8882 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 8883 { 8884 ctrl_info->controller_online = false; 8885 pqi_stop_heartbeat_timer(ctrl_info); 8886 pqi_ctrl_block_requests(ctrl_info); 8887 pqi_cancel_rescan_worker(ctrl_info); 8888 pqi_cancel_update_time_worker(ctrl_info); 8889 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) { 8890 pqi_fail_all_outstanding_requests(ctrl_info); 8891 ctrl_info->pqi_mode_enabled = false; 8892 } 8893 pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory); 8894 pqi_unregister_scsi(ctrl_info); 8895 if (ctrl_info->pqi_mode_enabled) 8896 pqi_revert_to_sis_mode(ctrl_info); 8897 pqi_free_ctrl_resources(ctrl_info); 8898 } 8899 8900 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) 8901 { 8902 pqi_ctrl_block_scan(ctrl_info); 8903 pqi_scsi_block_requests(ctrl_info); 8904 pqi_ctrl_block_device_reset(ctrl_info); 8905 pqi_ctrl_block_requests(ctrl_info); 8906 pqi_ctrl_wait_until_quiesced(ctrl_info); 8907 pqi_stop_heartbeat_timer(ctrl_info); 8908 } 8909 8910 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) 8911 { 8912 pqi_start_heartbeat_timer(ctrl_info); 8913 pqi_ctrl_unblock_requests(ctrl_info); 8914 pqi_ctrl_unblock_device_reset(ctrl_info); 8915 pqi_scsi_unblock_requests(ctrl_info); 8916 pqi_ctrl_unblock_scan(ctrl_info); 8917 } 8918 8919 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) 8920 { 8921 ssleep(delay_secs); 8922 8923 return pqi_ctrl_init_resume(ctrl_info); 8924 } 8925 8926 static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info, 8927 struct pqi_host_memory_descriptor *host_memory_descriptor, 8928 u32 total_size, u32 chunk_size) 8929 { 8930 int i; 8931 u32 sg_count; 8932 struct device *dev; 8933 struct pqi_host_memory *host_memory; 8934 struct pqi_sg_descriptor *mem_descriptor; 8935 dma_addr_t dma_handle; 8936 8937 sg_count = DIV_ROUND_UP(total_size, chunk_size); 8938 if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS) 8939 goto out; 8940 8941 host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL); 8942 if (!host_memory_descriptor->host_chunk_virt_address) 8943 goto out; 8944 8945 dev = &ctrl_info->pci_dev->dev; 8946 host_memory = host_memory_descriptor->host_memory; 8947 8948 for (i = 0; i < sg_count; i++) { 8949 host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); 8950 if (!host_memory_descriptor->host_chunk_virt_address[i]) 8951 goto out_free_chunks; 8952 mem_descriptor = &host_memory->sg_descriptor[i]; 8953 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); 8954 put_unaligned_le32(chunk_size, &mem_descriptor->length); 8955 } 8956 8957 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); 8958 put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors); 8959 put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated); 8960 8961 return 0; 8962 8963 out_free_chunks: 8964 while (--i >= 0) { 8965 mem_descriptor = &host_memory->sg_descriptor[i]; 8966 dma_free_coherent(dev, chunk_size, 8967 host_memory_descriptor->host_chunk_virt_address[i], 8968 get_unaligned_le64(&mem_descriptor->address)); 8969 } 8970 kfree(host_memory_descriptor->host_chunk_virt_address); 8971 out: 8972 return -ENOMEM; 8973 } 8974 8975 static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info, 8976 struct pqi_host_memory_descriptor *host_memory_descriptor, 8977 u32 total_required_size, u32 min_required_size) 8978 { 8979 u32 chunk_size; 8980 u32 min_chunk_size; 8981 8982 if (total_required_size == 0 || min_required_size == 0) 8983 return 0; 8984 8985 total_required_size = PAGE_ALIGN(total_required_size); 8986 min_required_size = PAGE_ALIGN(min_required_size); 8987 min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS); 8988 min_chunk_size = PAGE_ALIGN(min_chunk_size); 8989 8990 while (total_required_size >= min_required_size) { 8991 for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) { 8992 if (pqi_host_alloc_mem(ctrl_info, 8993 host_memory_descriptor, total_required_size, 8994 chunk_size) == 0) 8995 return 0; 8996 chunk_size /= 2; 8997 chunk_size = PAGE_ALIGN(chunk_size); 8998 } 8999 total_required_size /= 2; 9000 total_required_size = PAGE_ALIGN(total_required_size); 9001 } 9002 9003 return -ENOMEM; 9004 } 9005 9006 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, 9007 struct pqi_host_memory_descriptor *host_memory_descriptor, 9008 u32 total_size, u32 min_size) 9009 { 9010 struct device *dev; 9011 struct pqi_host_memory *host_memory; 9012 9013 dev = &ctrl_info->pci_dev->dev; 9014 9015 host_memory = dma_alloc_coherent(dev, sizeof(*host_memory), 9016 &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL); 9017 if (!host_memory) 9018 return; 9019 9020 host_memory_descriptor->host_memory = host_memory; 9021 9022 if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor, 9023 total_size, min_size) < 0) { 9024 dev_err(dev, "failed to allocate firmware usable host buffer\n"); 9025 dma_free_coherent(dev, sizeof(*host_memory), host_memory, 9026 host_memory_descriptor->host_memory_dma_handle); 9027 host_memory_descriptor->host_memory = NULL; 9028 return; 9029 } 9030 } 9031 9032 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, 9033 struct pqi_host_memory_descriptor *host_memory_descriptor) 9034 { 9035 unsigned int i; 9036 struct device *dev; 9037 struct pqi_host_memory *host_memory; 9038 struct pqi_sg_descriptor *mem_descriptor; 9039 unsigned int num_memory_descriptors; 9040 9041 host_memory = host_memory_descriptor->host_memory; 9042 if (!host_memory) 9043 return; 9044 9045 dev = &ctrl_info->pci_dev->dev; 9046 9047 if (get_unaligned_le32(&host_memory->bytes_allocated) == 0) 9048 goto out; 9049 9050 mem_descriptor = host_memory->sg_descriptor; 9051 num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors); 9052 9053 for (i = 0; i < num_memory_descriptors; i++) { 9054 dma_free_coherent(dev, 9055 get_unaligned_le32(&mem_descriptor[i].length), 9056 host_memory_descriptor->host_chunk_virt_address[i], 9057 get_unaligned_le64(&mem_descriptor[i].address)); 9058 } 9059 kfree(host_memory_descriptor->host_chunk_virt_address); 9060 9061 out: 9062 dma_free_coherent(dev, sizeof(*host_memory), host_memory, 9063 host_memory_descriptor->host_memory_dma_handle); 9064 host_memory_descriptor->host_memory = NULL; 9065 } 9066 9067 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, 9068 struct pqi_host_memory_descriptor *host_memory_descriptor, 9069 u16 function_code) 9070 { 9071 u32 buffer_length; 9072 struct pqi_vendor_general_request request; 9073 struct pqi_host_memory *host_memory; 9074 9075 memset(&request, 0, sizeof(request)); 9076 9077 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 9078 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 9079 put_unaligned_le16(function_code, &request.function_code); 9080 9081 host_memory = host_memory_descriptor->host_memory; 9082 9083 if (host_memory) { 9084 buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor); 9085 put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address); 9086 put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length); 9087 9088 if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) { 9089 put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version); 9090 memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature)); 9091 } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) { 9092 put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version); 9093 memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature)); 9094 } 9095 } 9096 9097 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 9098 } 9099 9100 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 9101 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 9102 .status = SAM_STAT_CHECK_CONDITION, 9103 }; 9104 9105 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 9106 { 9107 unsigned int i; 9108 struct pqi_io_request *io_request; 9109 struct scsi_cmnd *scmd; 9110 struct scsi_device *sdev; 9111 9112 for (i = 0; i < ctrl_info->max_io_slots; i++) { 9113 io_request = &ctrl_info->io_request_pool[i]; 9114 if (atomic_read(&io_request->refcount) == 0) 9115 continue; 9116 9117 scmd = io_request->scmd; 9118 if (scmd) { 9119 sdev = scmd->device; 9120 if (!sdev || !scsi_device_online(sdev)) { 9121 pqi_free_io_request(io_request); 9122 continue; 9123 } else { 9124 set_host_byte(scmd, DID_NO_CONNECT); 9125 } 9126 } else { 9127 io_request->status = -ENXIO; 9128 io_request->error_info = 9129 &pqi_ctrl_offline_raid_error_info; 9130 } 9131 9132 io_request->io_complete_callback(io_request, 9133 io_request->context); 9134 } 9135 } 9136 9137 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 9138 { 9139 pqi_perform_lockup_action(); 9140 pqi_stop_heartbeat_timer(ctrl_info); 9141 pqi_free_interrupts(ctrl_info); 9142 pqi_cancel_rescan_worker(ctrl_info); 9143 pqi_cancel_update_time_worker(ctrl_info); 9144 pqi_ctrl_wait_until_quiesced(ctrl_info); 9145 pqi_fail_all_outstanding_requests(ctrl_info); 9146 pqi_ctrl_unblock_requests(ctrl_info); 9147 pqi_take_ctrl_devices_offline(ctrl_info); 9148 } 9149 9150 static void pqi_ctrl_offline_worker(struct work_struct *work) 9151 { 9152 struct pqi_ctrl_info *ctrl_info; 9153 9154 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 9155 pqi_take_ctrl_offline_deferred(ctrl_info); 9156 } 9157 9158 static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 9159 { 9160 char *string; 9161 9162 switch (ctrl_shutdown_reason) { 9163 case PQI_IQ_NOT_DRAINED_TIMEOUT: 9164 string = "inbound queue not drained timeout"; 9165 break; 9166 case PQI_LUN_RESET_TIMEOUT: 9167 string = "LUN reset timeout"; 9168 break; 9169 case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT: 9170 string = "I/O pending timeout after LUN reset"; 9171 break; 9172 case PQI_NO_HEARTBEAT: 9173 string = "no controller heartbeat detected"; 9174 break; 9175 case PQI_FIRMWARE_KERNEL_NOT_UP: 9176 string = "firmware kernel not ready"; 9177 break; 9178 case PQI_OFA_RESPONSE_TIMEOUT: 9179 string = "OFA response timeout"; 9180 break; 9181 case PQI_INVALID_REQ_ID: 9182 string = "invalid request ID"; 9183 break; 9184 case PQI_UNMATCHED_REQ_ID: 9185 string = "unmatched request ID"; 9186 break; 9187 case PQI_IO_PI_OUT_OF_RANGE: 9188 string = "I/O queue producer index out of range"; 9189 break; 9190 case PQI_EVENT_PI_OUT_OF_RANGE: 9191 string = "event queue producer index out of range"; 9192 break; 9193 case PQI_UNEXPECTED_IU_TYPE: 9194 string = "unexpected IU type"; 9195 break; 9196 default: 9197 string = "unknown reason"; 9198 break; 9199 } 9200 9201 return string; 9202 } 9203 9204 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, 9205 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 9206 { 9207 if (!ctrl_info->controller_online) 9208 return; 9209 9210 ctrl_info->controller_online = false; 9211 ctrl_info->pqi_mode_enabled = false; 9212 pqi_ctrl_block_requests(ctrl_info); 9213 if (!pqi_disable_ctrl_shutdown) 9214 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason); 9215 pci_disable_device(ctrl_info->pci_dev); 9216 dev_err(&ctrl_info->pci_dev->dev, 9217 "controller offline: reason code 0x%x (%s)\n", 9218 ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason)); 9219 schedule_work(&ctrl_info->ctrl_offline_work); 9220 } 9221 9222 static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info) 9223 { 9224 int rc; 9225 unsigned long flags; 9226 struct pqi_scsi_dev *device; 9227 9228 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 9229 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 9230 rc = list_is_last(&device->scsi_device_list_entry, &ctrl_info->scsi_device_list); 9231 if (rc) 9232 continue; 9233 9234 /* 9235 * Is the sdev pointer NULL? 9236 */ 9237 if (device->sdev) 9238 scsi_device_set_state(device->sdev, SDEV_OFFLINE); 9239 } 9240 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 9241 } 9242 9243 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 9244 const struct pci_device_id *id) 9245 { 9246 char *ctrl_description; 9247 9248 if (id->driver_data) 9249 ctrl_description = (char *)id->driver_data; 9250 else 9251 ctrl_description = "Microchip Smart Family Controller"; 9252 9253 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 9254 } 9255 9256 static int pqi_pci_probe(struct pci_dev *pci_dev, 9257 const struct pci_device_id *id) 9258 { 9259 int rc; 9260 int node; 9261 struct pqi_ctrl_info *ctrl_info; 9262 9263 pqi_print_ctrl_info(pci_dev, id); 9264 9265 if (pqi_disable_device_id_wildcards && 9266 id->subvendor == PCI_ANY_ID && 9267 id->subdevice == PCI_ANY_ID) { 9268 dev_warn(&pci_dev->dev, 9269 "controller not probed because device ID wildcards are disabled\n"); 9270 return -ENODEV; 9271 } 9272 9273 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 9274 dev_warn(&pci_dev->dev, 9275 "controller device ID matched using wildcards\n"); 9276 9277 node = dev_to_node(&pci_dev->dev); 9278 if (node == NUMA_NO_NODE) { 9279 node = cpu_to_node(0); 9280 if (node == NUMA_NO_NODE) 9281 node = 0; 9282 set_dev_node(&pci_dev->dev, node); 9283 } 9284 9285 ctrl_info = pqi_alloc_ctrl_info(node); 9286 if (!ctrl_info) { 9287 dev_err(&pci_dev->dev, 9288 "failed to allocate controller info block\n"); 9289 return -ENOMEM; 9290 } 9291 ctrl_info->numa_node = node; 9292 9293 ctrl_info->pci_dev = pci_dev; 9294 9295 rc = pqi_pci_init(ctrl_info); 9296 if (rc) 9297 goto error; 9298 9299 rc = pqi_ctrl_init(ctrl_info); 9300 if (rc) 9301 goto error; 9302 9303 return 0; 9304 9305 error: 9306 pqi_remove_ctrl(ctrl_info); 9307 9308 return rc; 9309 } 9310 9311 static void pqi_pci_remove(struct pci_dev *pci_dev) 9312 { 9313 struct pqi_ctrl_info *ctrl_info; 9314 u16 vendor_id; 9315 int rc; 9316 9317 ctrl_info = pci_get_drvdata(pci_dev); 9318 if (!ctrl_info) 9319 return; 9320 9321 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id); 9322 if (vendor_id == 0xffff) 9323 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL; 9324 else 9325 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL; 9326 9327 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) { 9328 rc = pqi_flush_cache(ctrl_info, RESTART); 9329 if (rc) 9330 dev_err(&pci_dev->dev, 9331 "unable to flush controller cache during remove\n"); 9332 } 9333 9334 pqi_remove_ctrl(ctrl_info); 9335 } 9336 9337 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) 9338 { 9339 unsigned int i; 9340 struct pqi_io_request *io_request; 9341 struct scsi_cmnd *scmd; 9342 9343 for (i = 0; i < ctrl_info->max_io_slots; i++) { 9344 io_request = &ctrl_info->io_request_pool[i]; 9345 if (atomic_read(&io_request->refcount) == 0) 9346 continue; 9347 scmd = io_request->scmd; 9348 WARN_ON(scmd != NULL); /* IO command from SML */ 9349 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ 9350 } 9351 } 9352 9353 static void pqi_shutdown(struct pci_dev *pci_dev) 9354 { 9355 int rc; 9356 struct pqi_ctrl_info *ctrl_info; 9357 enum bmic_flush_cache_shutdown_event shutdown_event; 9358 9359 ctrl_info = pci_get_drvdata(pci_dev); 9360 if (!ctrl_info) { 9361 dev_err(&pci_dev->dev, 9362 "cache could not be flushed\n"); 9363 return; 9364 } 9365 9366 pqi_wait_until_ofa_finished(ctrl_info); 9367 9368 pqi_scsi_block_requests(ctrl_info); 9369 pqi_ctrl_block_device_reset(ctrl_info); 9370 pqi_ctrl_block_requests(ctrl_info); 9371 pqi_ctrl_wait_until_quiesced(ctrl_info); 9372 9373 if (system_state == SYSTEM_RESTART) 9374 shutdown_event = RESTART; 9375 else 9376 shutdown_event = SHUTDOWN; 9377 9378 /* 9379 * Write all data in the controller's battery-backed cache to 9380 * storage. 9381 */ 9382 rc = pqi_flush_cache(ctrl_info, shutdown_event); 9383 if (rc) 9384 dev_err(&pci_dev->dev, 9385 "unable to flush controller cache during shutdown\n"); 9386 9387 pqi_crash_if_pending_command(ctrl_info); 9388 pqi_reset(ctrl_info); 9389 } 9390 9391 static void pqi_process_lockup_action_param(void) 9392 { 9393 unsigned int i; 9394 9395 if (!pqi_lockup_action_param) 9396 return; 9397 9398 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 9399 if (strcmp(pqi_lockup_action_param, 9400 pqi_lockup_actions[i].name) == 0) { 9401 pqi_lockup_action = pqi_lockup_actions[i].action; 9402 return; 9403 } 9404 } 9405 9406 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 9407 DRIVER_NAME_SHORT, pqi_lockup_action_param); 9408 } 9409 9410 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30 9411 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60) 9412 9413 static void pqi_process_ctrl_ready_timeout_param(void) 9414 { 9415 if (pqi_ctrl_ready_timeout_secs == 0) 9416 return; 9417 9418 if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) { 9419 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n", 9420 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS); 9421 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS; 9422 } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) { 9423 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n", 9424 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS); 9425 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS; 9426 } 9427 9428 sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs; 9429 } 9430 9431 static void pqi_process_module_params(void) 9432 { 9433 pqi_process_lockup_action_param(); 9434 pqi_process_ctrl_ready_timeout_param(); 9435 } 9436 9437 #if defined(CONFIG_PM) 9438 9439 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev) 9440 { 9441 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304) 9442 return RESTART; 9443 9444 return SUSPEND; 9445 } 9446 9447 static int pqi_suspend_or_freeze(struct device *dev, bool suspend) 9448 { 9449 struct pci_dev *pci_dev; 9450 struct pqi_ctrl_info *ctrl_info; 9451 9452 pci_dev = to_pci_dev(dev); 9453 ctrl_info = pci_get_drvdata(pci_dev); 9454 9455 pqi_wait_until_ofa_finished(ctrl_info); 9456 9457 pqi_ctrl_block_scan(ctrl_info); 9458 pqi_scsi_block_requests(ctrl_info); 9459 pqi_ctrl_block_device_reset(ctrl_info); 9460 pqi_ctrl_block_requests(ctrl_info); 9461 pqi_ctrl_wait_until_quiesced(ctrl_info); 9462 9463 if (suspend) { 9464 enum bmic_flush_cache_shutdown_event shutdown_event; 9465 9466 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); 9467 pqi_flush_cache(ctrl_info, shutdown_event); 9468 } 9469 9470 pqi_stop_heartbeat_timer(ctrl_info); 9471 pqi_crash_if_pending_command(ctrl_info); 9472 pqi_free_irqs(ctrl_info); 9473 9474 ctrl_info->controller_online = false; 9475 ctrl_info->pqi_mode_enabled = false; 9476 9477 return 0; 9478 } 9479 9480 static __maybe_unused int pqi_suspend(struct device *dev) 9481 { 9482 return pqi_suspend_or_freeze(dev, true); 9483 } 9484 9485 static int pqi_resume_or_restore(struct device *dev) 9486 { 9487 int rc; 9488 struct pci_dev *pci_dev; 9489 struct pqi_ctrl_info *ctrl_info; 9490 9491 pci_dev = to_pci_dev(dev); 9492 ctrl_info = pci_get_drvdata(pci_dev); 9493 9494 rc = pqi_request_irqs(ctrl_info); 9495 if (rc) 9496 return rc; 9497 9498 pqi_ctrl_unblock_device_reset(ctrl_info); 9499 pqi_ctrl_unblock_requests(ctrl_info); 9500 pqi_scsi_unblock_requests(ctrl_info); 9501 pqi_ctrl_unblock_scan(ctrl_info); 9502 9503 ssleep(PQI_POST_RESET_DELAY_SECS); 9504 9505 return pqi_ctrl_init_resume(ctrl_info); 9506 } 9507 9508 static int pqi_freeze(struct device *dev) 9509 { 9510 return pqi_suspend_or_freeze(dev, false); 9511 } 9512 9513 static int pqi_thaw(struct device *dev) 9514 { 9515 int rc; 9516 struct pci_dev *pci_dev; 9517 struct pqi_ctrl_info *ctrl_info; 9518 9519 pci_dev = to_pci_dev(dev); 9520 ctrl_info = pci_get_drvdata(pci_dev); 9521 9522 rc = pqi_request_irqs(ctrl_info); 9523 if (rc) 9524 return rc; 9525 9526 ctrl_info->controller_online = true; 9527 ctrl_info->pqi_mode_enabled = true; 9528 9529 pqi_ctrl_unblock_device_reset(ctrl_info); 9530 pqi_ctrl_unblock_requests(ctrl_info); 9531 pqi_scsi_unblock_requests(ctrl_info); 9532 pqi_ctrl_unblock_scan(ctrl_info); 9533 9534 return 0; 9535 } 9536 9537 static int pqi_poweroff(struct device *dev) 9538 { 9539 struct pci_dev *pci_dev; 9540 struct pqi_ctrl_info *ctrl_info; 9541 enum bmic_flush_cache_shutdown_event shutdown_event; 9542 9543 pci_dev = to_pci_dev(dev); 9544 ctrl_info = pci_get_drvdata(pci_dev); 9545 9546 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); 9547 pqi_flush_cache(ctrl_info, shutdown_event); 9548 9549 return 0; 9550 } 9551 9552 static const struct dev_pm_ops pqi_pm_ops = { 9553 .suspend = pqi_suspend, 9554 .resume = pqi_resume_or_restore, 9555 .freeze = pqi_freeze, 9556 .thaw = pqi_thaw, 9557 .poweroff = pqi_poweroff, 9558 .restore = pqi_resume_or_restore, 9559 }; 9560 9561 #endif /* CONFIG_PM */ 9562 9563 /* Define the PCI IDs for the controllers that we support. */ 9564 static const struct pci_device_id pqi_pci_id_table[] = { 9565 { 9566 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9567 0x105b, 0x1211) 9568 }, 9569 { 9570 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9571 0x105b, 0x1321) 9572 }, 9573 { 9574 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9575 0x152d, 0x8a22) 9576 }, 9577 { 9578 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9579 0x152d, 0x8a23) 9580 }, 9581 { 9582 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9583 0x152d, 0x8a24) 9584 }, 9585 { 9586 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9587 0x152d, 0x8a36) 9588 }, 9589 { 9590 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9591 0x152d, 0x8a37) 9592 }, 9593 { 9594 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9595 0x193d, 0x0462) 9596 }, 9597 { 9598 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9599 0x193d, 0x1104) 9600 }, 9601 { 9602 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9603 0x193d, 0x1105) 9604 }, 9605 { 9606 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9607 0x193d, 0x1106) 9608 }, 9609 { 9610 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9611 0x193d, 0x1107) 9612 }, 9613 { 9614 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9615 0x193d, 0x1108) 9616 }, 9617 { 9618 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9619 0x193d, 0x1109) 9620 }, 9621 { 9622 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9623 0x193d, 0x110b) 9624 }, 9625 { 9626 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9627 0x193d, 0x1110) 9628 }, 9629 { 9630 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9631 0x193d, 0x8460) 9632 }, 9633 { 9634 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9635 0x193d, 0x8461) 9636 }, 9637 { 9638 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9639 0x193d, 0x8462) 9640 }, 9641 { 9642 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9643 0x193d, 0xc460) 9644 }, 9645 { 9646 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9647 0x193d, 0xc461) 9648 }, 9649 { 9650 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9651 0x193d, 0xf460) 9652 }, 9653 { 9654 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9655 0x193d, 0xf461) 9656 }, 9657 { 9658 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9659 0x1bd4, 0x0045) 9660 }, 9661 { 9662 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9663 0x1bd4, 0x0046) 9664 }, 9665 { 9666 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9667 0x1bd4, 0x0047) 9668 }, 9669 { 9670 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9671 0x1bd4, 0x0048) 9672 }, 9673 { 9674 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9675 0x1bd4, 0x004a) 9676 }, 9677 { 9678 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9679 0x1bd4, 0x004b) 9680 }, 9681 { 9682 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9683 0x1bd4, 0x004c) 9684 }, 9685 { 9686 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9687 0x1bd4, 0x004f) 9688 }, 9689 { 9690 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9691 0x1bd4, 0x0051) 9692 }, 9693 { 9694 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9695 0x1bd4, 0x0052) 9696 }, 9697 { 9698 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9699 0x1bd4, 0x0053) 9700 }, 9701 { 9702 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9703 0x1bd4, 0x0054) 9704 }, 9705 { 9706 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9707 0x1bd4, 0x006b) 9708 }, 9709 { 9710 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9711 0x1bd4, 0x006c) 9712 }, 9713 { 9714 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9715 0x1bd4, 0x006d) 9716 }, 9717 { 9718 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9719 0x1bd4, 0x006f) 9720 }, 9721 { 9722 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9723 0x1bd4, 0x0070) 9724 }, 9725 { 9726 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9727 0x1bd4, 0x0071) 9728 }, 9729 { 9730 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9731 0x1bd4, 0x0072) 9732 }, 9733 { 9734 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9735 0x1bd4, 0x0086) 9736 }, 9737 { 9738 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9739 0x1bd4, 0x0087) 9740 }, 9741 { 9742 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9743 0x1bd4, 0x0088) 9744 }, 9745 { 9746 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9747 0x1bd4, 0x0089) 9748 }, 9749 { 9750 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9751 0x1bd4, 0x00a3) 9752 }, 9753 { 9754 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9755 0x1ff9, 0x00a1) 9756 }, 9757 { 9758 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9759 0x1f3a, 0x0104) 9760 }, 9761 { 9762 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9763 0x19e5, 0xd227) 9764 }, 9765 { 9766 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9767 0x19e5, 0xd228) 9768 }, 9769 { 9770 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9771 0x19e5, 0xd229) 9772 }, 9773 { 9774 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9775 0x19e5, 0xd22a) 9776 }, 9777 { 9778 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9779 0x19e5, 0xd22b) 9780 }, 9781 { 9782 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9783 0x19e5, 0xd22c) 9784 }, 9785 { 9786 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9787 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 9788 }, 9789 { 9790 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9791 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 9792 }, 9793 { 9794 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9795 PCI_VENDOR_ID_ADAPTEC2, 0x0659) 9796 }, 9797 { 9798 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9799 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 9800 }, 9801 { 9802 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9803 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 9804 }, 9805 { 9806 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9807 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 9808 }, 9809 { 9810 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9811 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 9812 }, 9813 { 9814 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9815 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 9816 }, 9817 { 9818 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9819 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 9820 }, 9821 { 9822 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9823 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 9824 }, 9825 { 9826 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9827 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 9828 }, 9829 { 9830 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9831 PCI_VENDOR_ID_ADAPTEC2, 0x0808) 9832 }, 9833 { 9834 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9835 PCI_VENDOR_ID_ADAPTEC2, 0x0809) 9836 }, 9837 { 9838 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9839 PCI_VENDOR_ID_ADAPTEC2, 0x080a) 9840 }, 9841 { 9842 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9843 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 9844 }, 9845 { 9846 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9847 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 9848 }, 9849 { 9850 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9851 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 9852 }, 9853 { 9854 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9855 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 9856 }, 9857 { 9858 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9859 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 9860 }, 9861 { 9862 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9863 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 9864 }, 9865 { 9866 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9867 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 9868 }, 9869 { 9870 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9871 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 9872 }, 9873 { 9874 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9875 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 9876 }, 9877 { 9878 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9879 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 9880 }, 9881 { 9882 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9883 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 9884 }, 9885 { 9886 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9887 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 9888 }, 9889 { 9890 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9891 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 9892 }, 9893 { 9894 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9895 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 9896 }, 9897 { 9898 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9899 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 9900 }, 9901 { 9902 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9903 PCI_VENDOR_ID_ADAPTEC2, 0x1282) 9904 }, 9905 { 9906 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9907 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 9908 }, 9909 { 9910 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9911 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 9912 }, 9913 { 9914 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9915 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 9916 }, 9917 { 9918 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9919 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 9920 }, 9921 { 9922 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9923 PCI_VENDOR_ID_ADAPTEC2, 0x1304) 9924 }, 9925 { 9926 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9927 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 9928 }, 9929 { 9930 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9931 PCI_VENDOR_ID_ADAPTEC2, 0x1400) 9932 }, 9933 { 9934 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9935 PCI_VENDOR_ID_ADAPTEC2, 0x1402) 9936 }, 9937 { 9938 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9939 PCI_VENDOR_ID_ADAPTEC2, 0x1410) 9940 }, 9941 { 9942 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9943 PCI_VENDOR_ID_ADAPTEC2, 0x1411) 9944 }, 9945 { 9946 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9947 PCI_VENDOR_ID_ADAPTEC2, 0x1412) 9948 }, 9949 { 9950 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9951 PCI_VENDOR_ID_ADAPTEC2, 0x1420) 9952 }, 9953 { 9954 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9955 PCI_VENDOR_ID_ADAPTEC2, 0x1430) 9956 }, 9957 { 9958 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9959 PCI_VENDOR_ID_ADAPTEC2, 0x1440) 9960 }, 9961 { 9962 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9963 PCI_VENDOR_ID_ADAPTEC2, 0x1441) 9964 }, 9965 { 9966 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9967 PCI_VENDOR_ID_ADAPTEC2, 0x1450) 9968 }, 9969 { 9970 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9971 PCI_VENDOR_ID_ADAPTEC2, 0x1452) 9972 }, 9973 { 9974 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9975 PCI_VENDOR_ID_ADAPTEC2, 0x1460) 9976 }, 9977 { 9978 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9979 PCI_VENDOR_ID_ADAPTEC2, 0x1461) 9980 }, 9981 { 9982 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9983 PCI_VENDOR_ID_ADAPTEC2, 0x1462) 9984 }, 9985 { 9986 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9987 PCI_VENDOR_ID_ADAPTEC2, 0x1463) 9988 }, 9989 { 9990 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9991 PCI_VENDOR_ID_ADAPTEC2, 0x1470) 9992 }, 9993 { 9994 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9995 PCI_VENDOR_ID_ADAPTEC2, 0x1471) 9996 }, 9997 { 9998 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9999 PCI_VENDOR_ID_ADAPTEC2, 0x1472) 10000 }, 10001 { 10002 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10003 PCI_VENDOR_ID_ADAPTEC2, 0x1473) 10004 }, 10005 { 10006 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10007 PCI_VENDOR_ID_ADAPTEC2, 0x1474) 10008 }, 10009 { 10010 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10011 PCI_VENDOR_ID_ADAPTEC2, 0x1475) 10012 }, 10013 { 10014 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10015 PCI_VENDOR_ID_ADAPTEC2, 0x1480) 10016 }, 10017 { 10018 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10019 PCI_VENDOR_ID_ADAPTEC2, 0x1490) 10020 }, 10021 { 10022 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10023 PCI_VENDOR_ID_ADAPTEC2, 0x1491) 10024 }, 10025 { 10026 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10027 PCI_VENDOR_ID_ADAPTEC2, 0x14a0) 10028 }, 10029 { 10030 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10031 PCI_VENDOR_ID_ADAPTEC2, 0x14a1) 10032 }, 10033 { 10034 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10035 PCI_VENDOR_ID_ADAPTEC2, 0x14a2) 10036 }, 10037 { 10038 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10039 PCI_VENDOR_ID_ADAPTEC2, 0x14a4) 10040 }, 10041 { 10042 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10043 PCI_VENDOR_ID_ADAPTEC2, 0x14a5) 10044 }, 10045 { 10046 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10047 PCI_VENDOR_ID_ADAPTEC2, 0x14a6) 10048 }, 10049 { 10050 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10051 PCI_VENDOR_ID_ADAPTEC2, 0x14b0) 10052 }, 10053 { 10054 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10055 PCI_VENDOR_ID_ADAPTEC2, 0x14b1) 10056 }, 10057 { 10058 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10059 PCI_VENDOR_ID_ADAPTEC2, 0x14c0) 10060 }, 10061 { 10062 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10063 PCI_VENDOR_ID_ADAPTEC2, 0x14c1) 10064 }, 10065 { 10066 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10067 PCI_VENDOR_ID_ADAPTEC2, 0x14c2) 10068 }, 10069 { 10070 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10071 PCI_VENDOR_ID_ADAPTEC2, 0x14c3) 10072 }, 10073 { 10074 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10075 PCI_VENDOR_ID_ADAPTEC2, 0x14c4) 10076 }, 10077 { 10078 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10079 PCI_VENDOR_ID_ADAPTEC2, 0x14d0) 10080 }, 10081 { 10082 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10083 PCI_VENDOR_ID_ADAPTEC2, 0x14e0) 10084 }, 10085 { 10086 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10087 PCI_VENDOR_ID_ADAPTEC2, 0x14f0) 10088 }, 10089 { 10090 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10091 0x207d, 0x4044) 10092 }, 10093 { 10094 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10095 0x207d, 0x4054) 10096 }, 10097 { 10098 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10099 0x207d, 0x4084) 10100 }, 10101 { 10102 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10103 0x207d, 0x4094) 10104 }, 10105 { 10106 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10107 0x207d, 0x4140) 10108 }, 10109 { 10110 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10111 0x207d, 0x4240) 10112 }, 10113 { 10114 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10115 PCI_VENDOR_ID_ADVANTECH, 0x8312) 10116 }, 10117 { 10118 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10119 PCI_VENDOR_ID_DELL, 0x1fe0) 10120 }, 10121 { 10122 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10123 PCI_VENDOR_ID_HP, 0x0600) 10124 }, 10125 { 10126 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10127 PCI_VENDOR_ID_HP, 0x0601) 10128 }, 10129 { 10130 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10131 PCI_VENDOR_ID_HP, 0x0602) 10132 }, 10133 { 10134 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10135 PCI_VENDOR_ID_HP, 0x0603) 10136 }, 10137 { 10138 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10139 PCI_VENDOR_ID_HP, 0x0609) 10140 }, 10141 { 10142 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10143 PCI_VENDOR_ID_HP, 0x0650) 10144 }, 10145 { 10146 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10147 PCI_VENDOR_ID_HP, 0x0651) 10148 }, 10149 { 10150 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10151 PCI_VENDOR_ID_HP, 0x0652) 10152 }, 10153 { 10154 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10155 PCI_VENDOR_ID_HP, 0x0653) 10156 }, 10157 { 10158 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10159 PCI_VENDOR_ID_HP, 0x0654) 10160 }, 10161 { 10162 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10163 PCI_VENDOR_ID_HP, 0x0655) 10164 }, 10165 { 10166 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10167 PCI_VENDOR_ID_HP, 0x0700) 10168 }, 10169 { 10170 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10171 PCI_VENDOR_ID_HP, 0x0701) 10172 }, 10173 { 10174 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10175 PCI_VENDOR_ID_HP, 0x1001) 10176 }, 10177 { 10178 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10179 PCI_VENDOR_ID_HP, 0x1002) 10180 }, 10181 { 10182 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10183 PCI_VENDOR_ID_HP, 0x1100) 10184 }, 10185 { 10186 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10187 PCI_VENDOR_ID_HP, 0x1101) 10188 }, 10189 { 10190 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10191 0x1590, 0x0294) 10192 }, 10193 { 10194 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10195 0x1590, 0x02db) 10196 }, 10197 { 10198 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10199 0x1590, 0x02dc) 10200 }, 10201 { 10202 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10203 0x1590, 0x032e) 10204 }, 10205 { 10206 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10207 0x1590, 0x036f) 10208 }, 10209 { 10210 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10211 0x1590, 0x0381) 10212 }, 10213 { 10214 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10215 0x1590, 0x0382) 10216 }, 10217 { 10218 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10219 0x1590, 0x0383) 10220 }, 10221 { 10222 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10223 0x1d8d, 0x0800) 10224 }, 10225 { 10226 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10227 0x1d8d, 0x0908) 10228 }, 10229 { 10230 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10231 0x1d8d, 0x0806) 10232 }, 10233 { 10234 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10235 0x1d8d, 0x0916) 10236 }, 10237 { 10238 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10239 PCI_VENDOR_ID_GIGABYTE, 0x1000) 10240 }, 10241 { 10242 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10243 0x1dfc, 0x3161) 10244 }, 10245 { 10246 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10247 0x1f0c, 0x3161) 10248 }, 10249 { 10250 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10251 0x1cf2, 0x0804) 10252 }, 10253 { 10254 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10255 0x1cf2, 0x0805) 10256 }, 10257 { 10258 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10259 0x1cf2, 0x0806) 10260 }, 10261 { 10262 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10263 0x1cf2, 0x5445) 10264 }, 10265 { 10266 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10267 0x1cf2, 0x5446) 10268 }, 10269 { 10270 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10271 0x1cf2, 0x5447) 10272 }, 10273 { 10274 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10275 0x1cf2, 0x5449) 10276 }, 10277 { 10278 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10279 0x1cf2, 0x544a) 10280 }, 10281 { 10282 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10283 0x1cf2, 0x544b) 10284 }, 10285 { 10286 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10287 0x1cf2, 0x544d) 10288 }, 10289 { 10290 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10291 0x1cf2, 0x544e) 10292 }, 10293 { 10294 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10295 0x1cf2, 0x544f) 10296 }, 10297 { 10298 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10299 0x1cf2, 0x54da) 10300 }, 10301 { 10302 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10303 0x1cf2, 0x54db) 10304 }, 10305 { 10306 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10307 0x1cf2, 0x54dc) 10308 }, 10309 { 10310 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10311 0x1cf2, 0x0b27) 10312 }, 10313 { 10314 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10315 0x1cf2, 0x0b29) 10316 }, 10317 { 10318 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10319 0x1cf2, 0x0b45) 10320 }, 10321 { 10322 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10323 0x1cc4, 0x0101) 10324 }, 10325 { 10326 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10327 0x1cc4, 0x0201) 10328 }, 10329 { 10330 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10331 0x1018, 0x8238) 10332 }, 10333 { 10334 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10335 0x1f3f, 0x0610) 10336 }, 10337 { 10338 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10339 PCI_VENDOR_ID_LENOVO, 0x0220) 10340 }, 10341 { 10342 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10343 PCI_VENDOR_ID_LENOVO, 0x0221) 10344 }, 10345 { 10346 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10347 PCI_VENDOR_ID_LENOVO, 0x0222) 10348 }, 10349 { 10350 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10351 PCI_VENDOR_ID_LENOVO, 0x0223) 10352 }, 10353 { 10354 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10355 PCI_VENDOR_ID_LENOVO, 0x0224) 10356 }, 10357 { 10358 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10359 PCI_VENDOR_ID_LENOVO, 0x0225) 10360 }, 10361 { 10362 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10363 PCI_VENDOR_ID_LENOVO, 0x0520) 10364 }, 10365 { 10366 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10367 PCI_VENDOR_ID_LENOVO, 0x0521) 10368 }, 10369 { 10370 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10371 PCI_VENDOR_ID_LENOVO, 0x0522) 10372 }, 10373 { 10374 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10375 PCI_VENDOR_ID_LENOVO, 0x0620) 10376 }, 10377 { 10378 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10379 PCI_VENDOR_ID_LENOVO, 0x0621) 10380 }, 10381 { 10382 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10383 PCI_VENDOR_ID_LENOVO, 0x0622) 10384 }, 10385 { 10386 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10387 PCI_VENDOR_ID_LENOVO, 0x0623) 10388 }, 10389 { 10390 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10391 PCI_VENDOR_ID_LENOVO, 0x0624) 10392 }, 10393 { 10394 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10395 PCI_VENDOR_ID_LENOVO, 0x0625) 10396 }, 10397 { 10398 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10399 PCI_VENDOR_ID_LENOVO, 0x0626) 10400 }, 10401 { 10402 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10403 PCI_VENDOR_ID_LENOVO, 0x0627) 10404 }, 10405 { 10406 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10407 PCI_VENDOR_ID_LENOVO, 0x0628) 10408 }, 10409 { 10410 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10411 0x1014, 0x0718) 10412 }, 10413 { 10414 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10415 0x1137, 0x02f8) 10416 }, 10417 { 10418 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10419 0x1137, 0x02f9) 10420 }, 10421 { 10422 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10423 0x1137, 0x02fa) 10424 }, 10425 { 10426 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10427 0x1137, 0x02fe) 10428 }, 10429 { 10430 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10431 0x1137, 0x02ff) 10432 }, 10433 { 10434 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10435 0x1137, 0x0300) 10436 }, 10437 { 10438 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10439 0x1ded, 0x3301) 10440 }, 10441 { 10442 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10443 0x1ff9, 0x0045) 10444 }, 10445 { 10446 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10447 0x1ff9, 0x0046) 10448 }, 10449 { 10450 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10451 0x1ff9, 0x0047) 10452 }, 10453 { 10454 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10455 0x1ff9, 0x0048) 10456 }, 10457 { 10458 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10459 0x1ff9, 0x004a) 10460 }, 10461 { 10462 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10463 0x1ff9, 0x004b) 10464 }, 10465 { 10466 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10467 0x1ff9, 0x004c) 10468 }, 10469 { 10470 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10471 0x1ff9, 0x004f) 10472 }, 10473 { 10474 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10475 0x1ff9, 0x0051) 10476 }, 10477 { 10478 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10479 0x1ff9, 0x0052) 10480 }, 10481 { 10482 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10483 0x1ff9, 0x0053) 10484 }, 10485 { 10486 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10487 0x1ff9, 0x0054) 10488 }, 10489 { 10490 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10491 0x1ff9, 0x006b) 10492 }, 10493 { 10494 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10495 0x1ff9, 0x006c) 10496 }, 10497 { 10498 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10499 0x1ff9, 0x006d) 10500 }, 10501 { 10502 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10503 0x1ff9, 0x006f) 10504 }, 10505 { 10506 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10507 0x1ff9, 0x0070) 10508 }, 10509 { 10510 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10511 0x1ff9, 0x0071) 10512 }, 10513 { 10514 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10515 0x1ff9, 0x0072) 10516 }, 10517 { 10518 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10519 0x1ff9, 0x0086) 10520 }, 10521 { 10522 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10523 0x1ff9, 0x0087) 10524 }, 10525 { 10526 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10527 0x1ff9, 0x0088) 10528 }, 10529 { 10530 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10531 0x1ff9, 0x0089) 10532 }, 10533 { 10534 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10535 0x1e93, 0x1000) 10536 }, 10537 { 10538 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10539 0x1e93, 0x1001) 10540 }, 10541 { 10542 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10543 0x1e93, 0x1002) 10544 }, 10545 { 10546 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10547 0x1e93, 0x1005) 10548 }, 10549 { 10550 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10551 0x1f51, 0x1001) 10552 }, 10553 { 10554 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10555 0x1f51, 0x1002) 10556 }, 10557 { 10558 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10559 0x1f51, 0x1003) 10560 }, 10561 { 10562 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10563 0x1f51, 0x1004) 10564 }, 10565 { 10566 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10567 0x1f51, 0x1005) 10568 }, 10569 { 10570 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10571 0x1f51, 0x1006) 10572 }, 10573 { 10574 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10575 0x1f51, 0x1007) 10576 }, 10577 { 10578 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10579 0x1f51, 0x1008) 10580 }, 10581 { 10582 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10583 0x1f51, 0x1009) 10584 }, 10585 { 10586 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10587 0x1f51, 0x100a) 10588 }, 10589 { 10590 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10591 0x1f51, 0x100b) 10592 }, 10593 { 10594 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10595 0x1f51, 0x100e) 10596 }, 10597 { 10598 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10599 0x1f51, 0x100f) 10600 }, 10601 { 10602 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10603 0x1f51, 0x1010) 10604 }, 10605 { 10606 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10607 0x1f51, 0x1011) 10608 }, 10609 { 10610 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10611 0x1f51, 0x1043) 10612 }, 10613 { 10614 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10615 0x1f51, 0x1044) 10616 }, 10617 { 10618 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10619 0x1f51, 0x1045) 10620 }, 10621 { 10622 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10623 0x1ff9, 0x00a3) 10624 }, 10625 { 10626 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10627 PCI_ANY_ID, PCI_ANY_ID) 10628 }, 10629 { 0 } 10630 }; 10631 10632 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 10633 10634 static struct pci_driver pqi_pci_driver = { 10635 .name = DRIVER_NAME_SHORT, 10636 .id_table = pqi_pci_id_table, 10637 .probe = pqi_pci_probe, 10638 .remove = pqi_pci_remove, 10639 .shutdown = pqi_shutdown, 10640 #if defined(CONFIG_PM) 10641 .driver = { 10642 .pm = &pqi_pm_ops 10643 }, 10644 #endif 10645 }; 10646 10647 static int __init pqi_init(void) 10648 { 10649 int rc; 10650 10651 pr_info(DRIVER_NAME "\n"); 10652 pqi_verify_structures(); 10653 sis_verify_structures(); 10654 10655 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions); 10656 if (!pqi_sas_transport_template) 10657 return -ENODEV; 10658 10659 pqi_process_module_params(); 10660 10661 rc = pci_register_driver(&pqi_pci_driver); 10662 if (rc) 10663 sas_release_transport(pqi_sas_transport_template); 10664 10665 return rc; 10666 } 10667 10668 static void __exit pqi_cleanup(void) 10669 { 10670 pci_unregister_driver(&pqi_pci_driver); 10671 sas_release_transport(pqi_sas_transport_template); 10672 } 10673 10674 module_init(pqi_init); 10675 module_exit(pqi_cleanup); 10676 10677 static void pqi_verify_structures(void) 10678 { 10679 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10680 sis_host_to_ctrl_doorbell) != 0x20); 10681 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10682 sis_interrupt_mask) != 0x34); 10683 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10684 sis_ctrl_to_host_doorbell) != 0x9c); 10685 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10686 sis_ctrl_to_host_doorbell_clear) != 0xa0); 10687 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10688 sis_driver_scratch) != 0xb0); 10689 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10690 sis_product_identifier) != 0xb4); 10691 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10692 sis_firmware_status) != 0xbc); 10693 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10694 sis_ctrl_shutdown_reason_code) != 0xcc); 10695 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10696 sis_mailbox) != 0x1000); 10697 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10698 pqi_registers) != 0x4000); 10699 10700 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10701 iu_type) != 0x0); 10702 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10703 iu_length) != 0x2); 10704 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10705 response_queue_id) != 0x4); 10706 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10707 driver_flags) != 0x6); 10708 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 10709 10710 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10711 status) != 0x0); 10712 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10713 service_response) != 0x1); 10714 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10715 data_present) != 0x2); 10716 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10717 reserved) != 0x3); 10718 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10719 residual_count) != 0x4); 10720 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10721 data_length) != 0x8); 10722 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10723 reserved1) != 0xa); 10724 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10725 data) != 0xc); 10726 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 10727 10728 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10729 data_in_result) != 0x0); 10730 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10731 data_out_result) != 0x1); 10732 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10733 reserved) != 0x2); 10734 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10735 status) != 0x5); 10736 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10737 status_qualifier) != 0x6); 10738 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10739 sense_data_length) != 0x8); 10740 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10741 response_data_length) != 0xa); 10742 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10743 data_in_transferred) != 0xc); 10744 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10745 data_out_transferred) != 0x10); 10746 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10747 data) != 0x14); 10748 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 10749 10750 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10751 signature) != 0x0); 10752 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10753 function_and_status_code) != 0x8); 10754 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10755 max_admin_iq_elements) != 0x10); 10756 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10757 max_admin_oq_elements) != 0x11); 10758 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10759 admin_iq_element_length) != 0x12); 10760 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10761 admin_oq_element_length) != 0x13); 10762 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10763 max_reset_timeout) != 0x14); 10764 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10765 legacy_intx_status) != 0x18); 10766 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10767 legacy_intx_mask_set) != 0x1c); 10768 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10769 legacy_intx_mask_clear) != 0x20); 10770 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10771 device_status) != 0x40); 10772 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10773 admin_iq_pi_offset) != 0x48); 10774 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10775 admin_oq_ci_offset) != 0x50); 10776 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10777 admin_iq_element_array_addr) != 0x58); 10778 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10779 admin_oq_element_array_addr) != 0x60); 10780 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10781 admin_iq_ci_addr) != 0x68); 10782 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10783 admin_oq_pi_addr) != 0x70); 10784 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10785 admin_iq_num_elements) != 0x78); 10786 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10787 admin_oq_num_elements) != 0x79); 10788 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10789 admin_queue_int_msg_num) != 0x7a); 10790 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10791 device_error) != 0x80); 10792 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10793 error_details) != 0x88); 10794 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10795 device_reset) != 0x90); 10796 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10797 power_action) != 0x94); 10798 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 10799 10800 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10801 header.iu_type) != 0); 10802 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10803 header.iu_length) != 2); 10804 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10805 header.driver_flags) != 6); 10806 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10807 request_id) != 8); 10808 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10809 function_code) != 10); 10810 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10811 data.report_device_capability.buffer_length) != 44); 10812 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10813 data.report_device_capability.sg_descriptor) != 48); 10814 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10815 data.create_operational_iq.queue_id) != 12); 10816 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10817 data.create_operational_iq.element_array_addr) != 16); 10818 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10819 data.create_operational_iq.ci_addr) != 24); 10820 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10821 data.create_operational_iq.num_elements) != 32); 10822 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10823 data.create_operational_iq.element_length) != 34); 10824 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10825 data.create_operational_iq.queue_protocol) != 36); 10826 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10827 data.create_operational_oq.queue_id) != 12); 10828 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10829 data.create_operational_oq.element_array_addr) != 16); 10830 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10831 data.create_operational_oq.pi_addr) != 24); 10832 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10833 data.create_operational_oq.num_elements) != 32); 10834 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10835 data.create_operational_oq.element_length) != 34); 10836 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10837 data.create_operational_oq.queue_protocol) != 36); 10838 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10839 data.create_operational_oq.int_msg_num) != 40); 10840 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10841 data.create_operational_oq.coalescing_count) != 42); 10842 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10843 data.create_operational_oq.min_coalescing_time) != 44); 10844 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10845 data.create_operational_oq.max_coalescing_time) != 48); 10846 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10847 data.delete_operational_queue.queue_id) != 12); 10848 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 10849 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10850 data.create_operational_iq) != 64 - 11); 10851 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10852 data.create_operational_oq) != 64 - 11); 10853 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10854 data.delete_operational_queue) != 64 - 11); 10855 10856 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10857 header.iu_type) != 0); 10858 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10859 header.iu_length) != 2); 10860 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10861 header.driver_flags) != 6); 10862 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10863 request_id) != 8); 10864 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10865 function_code) != 10); 10866 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10867 status) != 11); 10868 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10869 data.create_operational_iq.status_descriptor) != 12); 10870 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10871 data.create_operational_iq.iq_pi_offset) != 16); 10872 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10873 data.create_operational_oq.status_descriptor) != 12); 10874 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10875 data.create_operational_oq.oq_ci_offset) != 16); 10876 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 10877 10878 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10879 header.iu_type) != 0); 10880 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10881 header.iu_length) != 2); 10882 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10883 header.response_queue_id) != 4); 10884 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10885 header.driver_flags) != 6); 10886 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10887 request_id) != 8); 10888 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10889 nexus_id) != 10); 10890 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10891 buffer_length) != 12); 10892 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10893 lun_number) != 16); 10894 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10895 protocol_specific) != 24); 10896 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10897 error_index) != 27); 10898 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10899 cdb) != 32); 10900 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10901 timeout) != 60); 10902 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10903 sg_descriptors) != 64); 10904 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 10905 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 10906 10907 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10908 header.iu_type) != 0); 10909 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10910 header.iu_length) != 2); 10911 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10912 header.response_queue_id) != 4); 10913 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10914 header.driver_flags) != 6); 10915 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10916 request_id) != 8); 10917 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10918 nexus_id) != 12); 10919 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10920 buffer_length) != 16); 10921 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10922 data_encryption_key_index) != 22); 10923 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10924 encrypt_tweak_lower) != 24); 10925 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10926 encrypt_tweak_upper) != 28); 10927 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10928 cdb) != 32); 10929 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10930 error_index) != 48); 10931 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10932 num_sg_descriptors) != 50); 10933 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10934 cdb_length) != 51); 10935 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10936 lun_number) != 52); 10937 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10938 sg_descriptors) != 64); 10939 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 10940 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 10941 10942 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10943 header.iu_type) != 0); 10944 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10945 header.iu_length) != 2); 10946 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10947 request_id) != 8); 10948 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10949 error_index) != 10); 10950 10951 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10952 header.iu_type) != 0); 10953 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10954 header.iu_length) != 2); 10955 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10956 header.response_queue_id) != 4); 10957 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10958 request_id) != 8); 10959 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10960 data.report_event_configuration.buffer_length) != 12); 10961 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10962 data.report_event_configuration.sg_descriptors) != 16); 10963 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10964 data.set_event_configuration.global_event_oq_id) != 10); 10965 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10966 data.set_event_configuration.buffer_length) != 12); 10967 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10968 data.set_event_configuration.sg_descriptors) != 16); 10969 10970 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 10971 max_inbound_iu_length) != 6); 10972 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 10973 max_outbound_iu_length) != 14); 10974 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 10975 10976 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10977 data_length) != 0); 10978 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10979 iq_arbitration_priority_support_bitmask) != 8); 10980 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10981 maximum_aw_a) != 9); 10982 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10983 maximum_aw_b) != 10); 10984 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10985 maximum_aw_c) != 11); 10986 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10987 max_inbound_queues) != 16); 10988 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10989 max_elements_per_iq) != 18); 10990 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10991 max_iq_element_length) != 24); 10992 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10993 min_iq_element_length) != 26); 10994 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10995 max_outbound_queues) != 30); 10996 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10997 max_elements_per_oq) != 32); 10998 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10999 intr_coalescing_time_granularity) != 34); 11000 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 11001 max_oq_element_length) != 36); 11002 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 11003 min_oq_element_length) != 38); 11004 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 11005 iu_layer_descriptors) != 64); 11006 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 11007 11008 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 11009 event_type) != 0); 11010 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 11011 oq_id) != 2); 11012 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 11013 11014 BUILD_BUG_ON(offsetof(struct pqi_event_config, 11015 num_event_descriptors) != 2); 11016 BUILD_BUG_ON(offsetof(struct pqi_event_config, 11017 descriptors) != 4); 11018 11019 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 11020 ARRAY_SIZE(pqi_supported_event_types)); 11021 11022 BUILD_BUG_ON(offsetof(struct pqi_event_response, 11023 header.iu_type) != 0); 11024 BUILD_BUG_ON(offsetof(struct pqi_event_response, 11025 header.iu_length) != 2); 11026 BUILD_BUG_ON(offsetof(struct pqi_event_response, 11027 event_type) != 8); 11028 BUILD_BUG_ON(offsetof(struct pqi_event_response, 11029 event_id) != 10); 11030 BUILD_BUG_ON(offsetof(struct pqi_event_response, 11031 additional_event_id) != 12); 11032 BUILD_BUG_ON(offsetof(struct pqi_event_response, 11033 data) != 16); 11034 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 11035 11036 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 11037 header.iu_type) != 0); 11038 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 11039 header.iu_length) != 2); 11040 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 11041 event_type) != 8); 11042 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 11043 event_id) != 10); 11044 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 11045 additional_event_id) != 12); 11046 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 11047 11048 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11049 header.iu_type) != 0); 11050 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11051 header.iu_length) != 2); 11052 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11053 request_id) != 8); 11054 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11055 nexus_id) != 10); 11056 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11057 timeout) != 14); 11058 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11059 lun_number) != 16); 11060 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11061 protocol_specific) != 24); 11062 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11063 outbound_queue_id_to_manage) != 26); 11064 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11065 request_id_to_manage) != 28); 11066 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11067 task_management_function) != 30); 11068 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 11069 11070 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 11071 header.iu_type) != 0); 11072 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 11073 header.iu_length) != 2); 11074 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 11075 request_id) != 8); 11076 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 11077 nexus_id) != 10); 11078 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 11079 additional_response_info) != 12); 11080 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 11081 response_code) != 15); 11082 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 11083 11084 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11085 configured_logical_drive_count) != 0); 11086 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11087 configuration_signature) != 1); 11088 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11089 firmware_version_short) != 5); 11090 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11091 extended_logical_unit_count) != 154); 11092 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11093 firmware_build_number) != 190); 11094 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11095 vendor_id) != 200); 11096 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11097 product_id) != 208); 11098 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11099 extra_controller_flags) != 286); 11100 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11101 controller_mode) != 292); 11102 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11103 spare_part_number) != 293); 11104 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11105 firmware_version_long) != 325); 11106 11107 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11108 phys_bay_in_box) != 115); 11109 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11110 device_type) != 120); 11111 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11112 redundant_path_present_map) != 1736); 11113 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11114 active_path_number) != 1738); 11115 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11116 alternate_paths_phys_connector) != 1739); 11117 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11118 alternate_paths_phys_box_on_port) != 1755); 11119 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11120 current_queue_depth_limit) != 1796); 11121 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 11122 11123 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4); 11124 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 11125 page_code) != 0); 11126 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 11127 subpage_code) != 1); 11128 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 11129 buffer_length) != 2); 11130 11131 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4); 11132 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 11133 page_code) != 0); 11134 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 11135 subpage_code) != 1); 11136 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 11137 page_length) != 2); 11138 11139 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage) 11140 != 18); 11141 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11142 header) != 0); 11143 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11144 firmware_read_support) != 4); 11145 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11146 driver_read_support) != 5); 11147 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11148 firmware_write_support) != 6); 11149 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11150 driver_write_support) != 7); 11151 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11152 max_transfer_encrypted_sas_sata) != 8); 11153 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11154 max_transfer_encrypted_nvme) != 10); 11155 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11156 max_write_raid_5_6) != 12); 11157 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11158 max_write_raid_1_10_2drive) != 14); 11159 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11160 max_write_raid_1_10_3drive) != 16); 11161 11162 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 11163 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 11164 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 11165 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11166 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 11167 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11168 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 11169 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 11170 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11171 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 11172 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 11173 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11174 11175 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 11176 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 11177 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 11178 } 11179