1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/idr.h> 12 13 /* global driver scop variables */ 14 LIST_HEAD(mrioc_list); 15 DEFINE_SPINLOCK(mrioc_list_lock); 16 static DEFINE_IDA(mrioc_ida); 17 static int warn_non_secure_ctlr; 18 atomic64_t event_counter; 19 20 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 21 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 22 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 23 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 24 25 /* Module parameters*/ 26 int prot_mask = -1; 27 module_param(prot_mask, int, 0); 28 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 29 30 static int prot_guard_mask = 3; 31 module_param(prot_guard_mask, int, 0); 32 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 33 static int logging_level; 34 module_param(logging_level, int, 0); 35 MODULE_PARM_DESC(logging_level, 36 " bits for enabling additional logging info (default=0)"); 37 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 38 module_param(max_sgl_entries, int, 0444); 39 MODULE_PARM_DESC(max_sgl_entries, 40 "Preferred max number of SG entries to be used for a single I/O\n" 41 "The actual value will be determined by the driver\n" 42 "(Minimum=256, Maximum=2048, default=256)"); 43 44 /* Forward declarations*/ 45 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 46 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 47 48 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) 49 50 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) 51 52 /** 53 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 54 * @mrioc: Adapter instance reference 55 * @scmd: SCSI command reference 56 * 57 * Calculate the host tag based on block tag for a given scmd. 58 * 59 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 60 */ 61 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 62 struct scsi_cmnd *scmd) 63 { 64 struct scmd_priv *priv = NULL; 65 u32 unique_tag; 66 u16 host_tag, hw_queue; 67 68 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 69 70 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 71 if (hw_queue >= mrioc->num_op_reply_q) 72 return MPI3MR_HOSTTAG_INVALID; 73 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 74 75 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 76 return MPI3MR_HOSTTAG_INVALID; 77 78 priv = scsi_cmd_priv(scmd); 79 /*host_tag 0 is invalid hence incrementing by 1*/ 80 priv->host_tag = host_tag + 1; 81 priv->scmd = scmd; 82 priv->in_lld_scope = 1; 83 priv->req_q_idx = hw_queue; 84 priv->meta_chain_idx = -1; 85 priv->chain_idx = -1; 86 priv->meta_sg_valid = 0; 87 return priv->host_tag; 88 } 89 90 /** 91 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 92 * @mrioc: Adapter instance reference 93 * @host_tag: Host tag 94 * @qidx: Operational queue index 95 * 96 * Identify the block tag from the host tag and queue index and 97 * retrieve associated scsi command using scsi_host_find_tag(). 98 * 99 * Return: SCSI command reference or NULL. 100 */ 101 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 102 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 103 { 104 struct scsi_cmnd *scmd = NULL; 105 struct scmd_priv *priv = NULL; 106 u32 unique_tag = host_tag - 1; 107 108 if (WARN_ON(host_tag > mrioc->max_host_ios)) 109 goto out; 110 111 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 112 113 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 114 if (scmd) { 115 priv = scsi_cmd_priv(scmd); 116 if (!priv->in_lld_scope) 117 scmd = NULL; 118 } 119 out: 120 return scmd; 121 } 122 123 /** 124 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 125 * @mrioc: Adapter instance reference 126 * @scmd: SCSI command reference 127 * 128 * Invalidate the SCSI command private data to mark the command 129 * is not in LLD scope anymore. 130 * 131 * Return: Nothing. 132 */ 133 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 134 struct scsi_cmnd *scmd) 135 { 136 struct scmd_priv *priv = NULL; 137 138 priv = scsi_cmd_priv(scmd); 139 140 if (WARN_ON(priv->in_lld_scope == 0)) 141 return; 142 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 143 priv->req_q_idx = 0xFFFF; 144 priv->scmd = NULL; 145 priv->in_lld_scope = 0; 146 priv->meta_sg_valid = 0; 147 if (priv->chain_idx >= 0) { 148 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 149 priv->chain_idx = -1; 150 } 151 if (priv->meta_chain_idx >= 0) { 152 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 153 priv->meta_chain_idx = -1; 154 } 155 } 156 157 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 158 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 159 static void mpi3mr_fwevt_worker(struct work_struct *work); 160 161 /** 162 * mpi3mr_fwevt_free - firmware event memory dealloctor 163 * @r: k reference pointer of the firmware event 164 * 165 * Free firmware event memory when no reference. 166 */ 167 static void mpi3mr_fwevt_free(struct kref *r) 168 { 169 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 170 } 171 172 /** 173 * mpi3mr_fwevt_get - k reference incrementor 174 * @fwevt: Firmware event reference 175 * 176 * Increment firmware event reference count. 177 */ 178 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 179 { 180 kref_get(&fwevt->ref_count); 181 } 182 183 /** 184 * mpi3mr_fwevt_put - k reference decrementor 185 * @fwevt: Firmware event reference 186 * 187 * decrement firmware event reference count. 188 */ 189 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 190 { 191 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 192 } 193 194 /** 195 * mpi3mr_alloc_fwevt - Allocate firmware event 196 * @len: length of firmware event data to allocate 197 * 198 * Allocate firmware event with required length and initialize 199 * the reference counter. 200 * 201 * Return: firmware event reference. 202 */ 203 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 204 { 205 struct mpi3mr_fwevt *fwevt; 206 207 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 208 if (!fwevt) 209 return NULL; 210 211 kref_init(&fwevt->ref_count); 212 return fwevt; 213 } 214 215 /** 216 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 217 * @mrioc: Adapter instance reference 218 * @fwevt: Firmware event reference 219 * 220 * Add the given firmware event to the firmware event list. 221 * 222 * Return: Nothing. 223 */ 224 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 225 struct mpi3mr_fwevt *fwevt) 226 { 227 unsigned long flags; 228 229 if (!mrioc->fwevt_worker_thread) 230 return; 231 232 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 233 /* get fwevt reference count while adding it to fwevt_list */ 234 mpi3mr_fwevt_get(fwevt); 235 INIT_LIST_HEAD(&fwevt->list); 236 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 237 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 238 /* get fwevt reference count while enqueueing it to worker queue */ 239 mpi3mr_fwevt_get(fwevt); 240 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 241 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 242 } 243 244 /** 245 * mpi3mr_hdb_trigger_data_event - Add hdb trigger data event to 246 * the list 247 * @mrioc: Adapter instance reference 248 * @event_data: Event data 249 * 250 * Add the given hdb trigger data event to the firmware event 251 * list. 252 * 253 * Return: Nothing. 254 */ 255 void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc, 256 struct trigger_event_data *event_data) 257 { 258 struct mpi3mr_fwevt *fwevt; 259 u16 sz = sizeof(*event_data); 260 261 fwevt = mpi3mr_alloc_fwevt(sz); 262 if (!fwevt) { 263 ioc_warn(mrioc, "failed to queue hdb trigger data event\n"); 264 return; 265 } 266 267 fwevt->mrioc = mrioc; 268 fwevt->event_id = MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER; 269 fwevt->send_ack = 0; 270 fwevt->process_evt = 1; 271 fwevt->evt_ctx = 0; 272 fwevt->event_data_size = sz; 273 memcpy(fwevt->event_data, event_data, sz); 274 275 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 276 } 277 278 /** 279 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 280 * @mrioc: Adapter instance reference 281 * @fwevt: Firmware event reference 282 * 283 * Delete the given firmware event from the firmware event list. 284 * 285 * Return: Nothing. 286 */ 287 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 288 struct mpi3mr_fwevt *fwevt) 289 { 290 unsigned long flags; 291 292 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 293 if (!list_empty(&fwevt->list)) { 294 list_del_init(&fwevt->list); 295 /* 296 * Put fwevt reference count after 297 * removing it from fwevt_list 298 */ 299 mpi3mr_fwevt_put(fwevt); 300 } 301 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 302 } 303 304 /** 305 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 306 * @mrioc: Adapter instance reference 307 * 308 * Dequeue a firmware event from the firmware event list. 309 * 310 * Return: firmware event. 311 */ 312 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 313 struct mpi3mr_ioc *mrioc) 314 { 315 unsigned long flags; 316 struct mpi3mr_fwevt *fwevt = NULL; 317 318 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 319 if (!list_empty(&mrioc->fwevt_list)) { 320 fwevt = list_first_entry(&mrioc->fwevt_list, 321 struct mpi3mr_fwevt, list); 322 list_del_init(&fwevt->list); 323 /* 324 * Put fwevt reference count after 325 * removing it from fwevt_list 326 */ 327 mpi3mr_fwevt_put(fwevt); 328 } 329 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 330 331 return fwevt; 332 } 333 334 /** 335 * mpi3mr_cancel_work - cancel firmware event 336 * @fwevt: fwevt object which needs to be canceled 337 * 338 * Return: Nothing. 339 */ 340 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 341 { 342 /* 343 * Wait on the fwevt to complete. If this returns 1, then 344 * the event was never executed. 345 * 346 * If it did execute, we wait for it to finish, and the put will 347 * happen from mpi3mr_process_fwevt() 348 */ 349 if (cancel_work_sync(&fwevt->work)) { 350 /* 351 * Put fwevt reference count after 352 * dequeuing it from worker queue 353 */ 354 mpi3mr_fwevt_put(fwevt); 355 /* 356 * Put fwevt reference count to neutralize 357 * kref_init increment 358 */ 359 mpi3mr_fwevt_put(fwevt); 360 } 361 } 362 363 /** 364 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 365 * @mrioc: Adapter instance reference 366 * 367 * Flush all pending firmware events from the firmware event 368 * list. 369 * 370 * Return: Nothing. 371 */ 372 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 373 { 374 struct mpi3mr_fwevt *fwevt = NULL; 375 376 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 377 !mrioc->fwevt_worker_thread) 378 return; 379 380 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 381 mpi3mr_cancel_work(fwevt); 382 383 if (mrioc->current_event) { 384 fwevt = mrioc->current_event; 385 /* 386 * Don't call cancel_work_sync() API for the 387 * fwevt work if the controller reset is 388 * get called as part of processing the 389 * same fwevt work (or) when worker thread is 390 * waiting for device add/remove APIs to complete. 391 * Otherwise we will see deadlock. 392 */ 393 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 394 fwevt->discard = 1; 395 return; 396 } 397 398 mpi3mr_cancel_work(fwevt); 399 } 400 } 401 402 /** 403 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event 404 * @mrioc: Adapter instance reference 405 * @tg: Throttle group information pointer 406 * 407 * Accessor to queue on synthetically generated driver event to 408 * the event worker thread, the driver event will be used to 409 * reduce the QD of all VDs in the TG from the worker thread. 410 * 411 * Return: None. 412 */ 413 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, 414 struct mpi3mr_throttle_group_info *tg) 415 { 416 struct mpi3mr_fwevt *fwevt; 417 u16 sz = sizeof(struct mpi3mr_throttle_group_info *); 418 419 /* 420 * If the QD reduction event is already queued due to throttle and if 421 * the QD is not restored through device info change event 422 * then dont queue further reduction events 423 */ 424 if (tg->fw_qd != tg->modified_qd) 425 return; 426 427 fwevt = mpi3mr_alloc_fwevt(sz); 428 if (!fwevt) { 429 ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); 430 return; 431 } 432 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; 433 fwevt->mrioc = mrioc; 434 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; 435 fwevt->send_ack = 0; 436 fwevt->process_evt = 1; 437 fwevt->evt_ctx = 0; 438 fwevt->event_data_size = sz; 439 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); 440 441 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", 442 tg->id); 443 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 444 } 445 446 /** 447 * mpi3mr_invalidate_devhandles -Invalidate device handles 448 * @mrioc: Adapter instance reference 449 * 450 * Invalidate the device handles in the target device structures 451 * . Called post reset prior to reinitializing the controller. 452 * 453 * Return: Nothing. 454 */ 455 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 456 { 457 struct mpi3mr_tgt_dev *tgtdev; 458 struct mpi3mr_stgt_priv_data *tgt_priv; 459 460 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 461 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 462 if (tgtdev->starget && tgtdev->starget->hostdata) { 463 tgt_priv = tgtdev->starget->hostdata; 464 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 465 tgt_priv->io_throttle_enabled = 0; 466 tgt_priv->io_divert = 0; 467 tgt_priv->throttle_group = NULL; 468 tgt_priv->wslen = 0; 469 if (tgtdev->host_exposed) 470 atomic_set(&tgt_priv->block_io, 1); 471 } 472 } 473 } 474 475 /** 476 * mpi3mr_print_scmd - print individual SCSI command 477 * @rq: Block request 478 * @data: Adapter instance reference 479 * 480 * Print the SCSI command details if it is in LLD scope. 481 * 482 * Return: true always. 483 */ 484 static bool mpi3mr_print_scmd(struct request *rq, void *data) 485 { 486 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 487 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 488 struct scmd_priv *priv = NULL; 489 490 if (scmd) { 491 priv = scsi_cmd_priv(scmd); 492 if (!priv->in_lld_scope) 493 goto out; 494 495 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 496 __func__, priv->host_tag, priv->req_q_idx + 1); 497 scsi_print_command(scmd); 498 } 499 500 out: 501 return(true); 502 } 503 504 /** 505 * mpi3mr_flush_scmd - Flush individual SCSI command 506 * @rq: Block request 507 * @data: Adapter instance reference 508 * 509 * Return the SCSI command to the upper layers if it is in LLD 510 * scope. 511 * 512 * Return: true always. 513 */ 514 515 static bool mpi3mr_flush_scmd(struct request *rq, void *data) 516 { 517 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 518 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 519 struct scmd_priv *priv = NULL; 520 521 if (scmd) { 522 priv = scsi_cmd_priv(scmd); 523 if (!priv->in_lld_scope) 524 goto out; 525 526 if (priv->meta_sg_valid) 527 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 528 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 529 mpi3mr_clear_scmd_priv(mrioc, scmd); 530 scsi_dma_unmap(scmd); 531 scmd->result = DID_RESET << 16; 532 scsi_print_command(scmd); 533 scsi_done(scmd); 534 mrioc->flush_io_count++; 535 } 536 537 out: 538 return(true); 539 } 540 541 /** 542 * mpi3mr_count_dev_pending - Count commands pending for a lun 543 * @rq: Block request 544 * @data: SCSI device reference 545 * 546 * This is an iterator function called for each SCSI command in 547 * a host and if the command is pending in the LLD for the 548 * specific device(lun) then device specific pending I/O counter 549 * is updated in the device structure. 550 * 551 * Return: true always. 552 */ 553 554 static bool mpi3mr_count_dev_pending(struct request *rq, void *data) 555 { 556 struct scsi_device *sdev = (struct scsi_device *)data; 557 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 558 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 559 struct scmd_priv *priv; 560 561 if (scmd) { 562 priv = scsi_cmd_priv(scmd); 563 if (!priv->in_lld_scope) 564 goto out; 565 if (scmd->device == sdev) 566 sdev_priv_data->pend_count++; 567 } 568 569 out: 570 return true; 571 } 572 573 /** 574 * mpi3mr_count_tgt_pending - Count commands pending for target 575 * @rq: Block request 576 * @data: SCSI target reference 577 * 578 * This is an iterator function called for each SCSI command in 579 * a host and if the command is pending in the LLD for the 580 * specific target then target specific pending I/O counter is 581 * updated in the target structure. 582 * 583 * Return: true always. 584 */ 585 586 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) 587 { 588 struct scsi_target *starget = (struct scsi_target *)data; 589 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 590 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 591 struct scmd_priv *priv; 592 593 if (scmd) { 594 priv = scsi_cmd_priv(scmd); 595 if (!priv->in_lld_scope) 596 goto out; 597 if (scmd->device && (scsi_target(scmd->device) == starget)) 598 stgt_priv_data->pend_count++; 599 } 600 601 out: 602 return true; 603 } 604 605 /** 606 * mpi3mr_flush_host_io - Flush host I/Os 607 * @mrioc: Adapter instance reference 608 * 609 * Flush all of the pending I/Os by calling 610 * blk_mq_tagset_busy_iter() for each possible tag. This is 611 * executed post controller reset 612 * 613 * Return: Nothing. 614 */ 615 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 616 { 617 struct Scsi_Host *shost = mrioc->shost; 618 619 mrioc->flush_io_count = 0; 620 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 621 blk_mq_tagset_busy_iter(&shost->tag_set, 622 mpi3mr_flush_scmd, (void *)mrioc); 623 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 624 mrioc->flush_io_count); 625 } 626 627 /** 628 * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds 629 * @mrioc: Adapter instance reference 630 * 631 * This function waits for currently running IO poll threads to 632 * exit and then flushes all host I/Os and any internal pending 633 * cmds. This is executed after controller is marked as 634 * unrecoverable. 635 * 636 * Return: Nothing. 637 */ 638 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) 639 { 640 struct Scsi_Host *shost = mrioc->shost; 641 int i; 642 643 if (!mrioc->unrecoverable) 644 return; 645 646 if (mrioc->op_reply_qinfo) { 647 for (i = 0; i < mrioc->num_queues; i++) { 648 while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) 649 udelay(500); 650 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 651 } 652 } 653 mrioc->flush_io_count = 0; 654 blk_mq_tagset_busy_iter(&shost->tag_set, 655 mpi3mr_flush_scmd, (void *)mrioc); 656 mpi3mr_flush_delayed_cmd_lists(mrioc); 657 mpi3mr_flush_drv_cmds(mrioc); 658 } 659 660 /** 661 * mpi3mr_alloc_tgtdev - target device allocator 662 * 663 * Allocate target device instance and initialize the reference 664 * count 665 * 666 * Return: target device instance. 667 */ 668 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 669 { 670 struct mpi3mr_tgt_dev *tgtdev; 671 672 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 673 if (!tgtdev) 674 return NULL; 675 kref_init(&tgtdev->ref_count); 676 return tgtdev; 677 } 678 679 /** 680 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 681 * @mrioc: Adapter instance reference 682 * @tgtdev: Target device 683 * 684 * Add the target device to the target device list 685 * 686 * Return: Nothing. 687 */ 688 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 689 struct mpi3mr_tgt_dev *tgtdev) 690 { 691 unsigned long flags; 692 693 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 694 mpi3mr_tgtdev_get(tgtdev); 695 INIT_LIST_HEAD(&tgtdev->list); 696 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 697 tgtdev->state = MPI3MR_DEV_CREATED; 698 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 699 } 700 701 /** 702 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 703 * @mrioc: Adapter instance reference 704 * @tgtdev: Target device 705 * @must_delete: Must delete the target device from the list irrespective 706 * of the device state. 707 * 708 * Remove the target device from the target device list 709 * 710 * Return: Nothing. 711 */ 712 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 713 struct mpi3mr_tgt_dev *tgtdev, bool must_delete) 714 { 715 unsigned long flags; 716 717 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 718 if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { 719 if (!list_empty(&tgtdev->list)) { 720 list_del_init(&tgtdev->list); 721 tgtdev->state = MPI3MR_DEV_DELETED; 722 mpi3mr_tgtdev_put(tgtdev); 723 } 724 } 725 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 726 } 727 728 /** 729 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 730 * @mrioc: Adapter instance reference 731 * @handle: Device handle 732 * 733 * Accessor to retrieve target device from the device handle. 734 * Non Lock version 735 * 736 * Return: Target device reference. 737 */ 738 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 739 struct mpi3mr_ioc *mrioc, u16 handle) 740 { 741 struct mpi3mr_tgt_dev *tgtdev; 742 743 assert_spin_locked(&mrioc->tgtdev_lock); 744 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 745 if (tgtdev->dev_handle == handle) 746 goto found_tgtdev; 747 return NULL; 748 749 found_tgtdev: 750 mpi3mr_tgtdev_get(tgtdev); 751 return tgtdev; 752 } 753 754 /** 755 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 756 * @mrioc: Adapter instance reference 757 * @handle: Device handle 758 * 759 * Accessor to retrieve target device from the device handle. 760 * Lock version 761 * 762 * Return: Target device reference. 763 */ 764 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 765 struct mpi3mr_ioc *mrioc, u16 handle) 766 { 767 struct mpi3mr_tgt_dev *tgtdev; 768 unsigned long flags; 769 770 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 771 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 772 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 773 return tgtdev; 774 } 775 776 /** 777 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 778 * @mrioc: Adapter instance reference 779 * @persist_id: Persistent ID 780 * 781 * Accessor to retrieve target device from the Persistent ID. 782 * Non Lock version 783 * 784 * Return: Target device reference. 785 */ 786 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 787 struct mpi3mr_ioc *mrioc, u16 persist_id) 788 { 789 struct mpi3mr_tgt_dev *tgtdev; 790 791 assert_spin_locked(&mrioc->tgtdev_lock); 792 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 793 if (tgtdev->perst_id == persist_id) 794 goto found_tgtdev; 795 return NULL; 796 797 found_tgtdev: 798 mpi3mr_tgtdev_get(tgtdev); 799 return tgtdev; 800 } 801 802 /** 803 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 804 * @mrioc: Adapter instance reference 805 * @persist_id: Persistent ID 806 * 807 * Accessor to retrieve target device from the Persistent ID. 808 * Lock version 809 * 810 * Return: Target device reference. 811 */ 812 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 813 struct mpi3mr_ioc *mrioc, u16 persist_id) 814 { 815 struct mpi3mr_tgt_dev *tgtdev; 816 unsigned long flags; 817 818 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 819 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 820 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 821 return tgtdev; 822 } 823 824 /** 825 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 826 * @mrioc: Adapter instance reference 827 * @tgt_priv: Target private data 828 * 829 * Accessor to return target device from the target private 830 * data. Non Lock version 831 * 832 * Return: Target device reference. 833 */ 834 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 835 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 836 { 837 struct mpi3mr_tgt_dev *tgtdev; 838 839 assert_spin_locked(&mrioc->tgtdev_lock); 840 tgtdev = tgt_priv->tgt_dev; 841 if (tgtdev) 842 mpi3mr_tgtdev_get(tgtdev); 843 return tgtdev; 844 } 845 846 /** 847 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs 848 * @mrioc: Adapter instance reference 849 * @tg: Throttle group information pointer 850 * @divert_value: 1 or 0 851 * 852 * Accessor to set io_divert flag for each device associated 853 * with the given throttle group with the given value. 854 * 855 * Return: None. 856 */ 857 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 858 struct mpi3mr_throttle_group_info *tg, u8 divert_value) 859 { 860 unsigned long flags; 861 struct mpi3mr_tgt_dev *tgtdev; 862 struct mpi3mr_stgt_priv_data *tgt_priv; 863 864 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 865 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 866 if (tgtdev->starget && tgtdev->starget->hostdata) { 867 tgt_priv = tgtdev->starget->hostdata; 868 if (tgt_priv->throttle_group == tg) 869 tgt_priv->io_divert = divert_value; 870 } 871 } 872 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 873 } 874 875 /** 876 * mpi3mr_print_device_event_notice - print notice related to post processing of 877 * device event after controller reset. 878 * 879 * @mrioc: Adapter instance reference 880 * @device_add: true for device add event and false for device removal event 881 * 882 * Return: None. 883 */ 884 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 885 bool device_add) 886 { 887 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 888 (device_add ? "addition" : "removal")); 889 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 890 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 891 } 892 893 /** 894 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 895 * @mrioc: Adapter instance reference 896 * @tgtdev: Target device structure 897 * 898 * Checks whether the device is exposed to upper layers and if it 899 * is then remove the device from upper layers by calling 900 * scsi_remove_target(). 901 * 902 * Return: 0 on success, non zero on failure. 903 */ 904 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 905 struct mpi3mr_tgt_dev *tgtdev) 906 { 907 struct mpi3mr_stgt_priv_data *tgt_priv; 908 909 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 910 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 911 if (tgtdev->starget && tgtdev->starget->hostdata) { 912 tgt_priv = tgtdev->starget->hostdata; 913 atomic_set(&tgt_priv->block_io, 0); 914 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 915 } 916 917 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 918 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { 919 if (tgtdev->starget) { 920 if (mrioc->current_event) 921 mrioc->current_event->pending_at_sml = 1; 922 scsi_remove_target(&tgtdev->starget->dev); 923 tgtdev->host_exposed = 0; 924 if (mrioc->current_event) { 925 mrioc->current_event->pending_at_sml = 0; 926 if (mrioc->current_event->discard) { 927 mpi3mr_print_device_event_notice(mrioc, 928 false); 929 return; 930 } 931 } 932 } 933 } else 934 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); 935 mpi3mr_global_trigger(mrioc, 936 MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED); 937 938 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 939 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 940 } 941 942 /** 943 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 944 * @mrioc: Adapter instance reference 945 * @perst_id: Persistent ID of the device 946 * 947 * Checks whether the device can be exposed to upper layers and 948 * if it is not then expose the device to upper layers by 949 * calling scsi_scan_target(). 950 * 951 * Return: 0 on success, non zero on failure. 952 */ 953 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 954 u16 perst_id) 955 { 956 int retval = 0; 957 struct mpi3mr_tgt_dev *tgtdev; 958 959 if (mrioc->reset_in_progress || mrioc->pci_err_recovery) 960 return -1; 961 962 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 963 if (!tgtdev) { 964 retval = -1; 965 goto out; 966 } 967 if (tgtdev->is_hidden || tgtdev->host_exposed) { 968 retval = -1; 969 goto out; 970 } 971 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 972 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ 973 tgtdev->host_exposed = 1; 974 if (mrioc->current_event) 975 mrioc->current_event->pending_at_sml = 1; 976 scsi_scan_target(&mrioc->shost->shost_gendev, 977 mrioc->scsi_device_channel, tgtdev->perst_id, 978 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 979 if (!tgtdev->starget) 980 tgtdev->host_exposed = 0; 981 if (mrioc->current_event) { 982 mrioc->current_event->pending_at_sml = 0; 983 if (mrioc->current_event->discard) { 984 mpi3mr_print_device_event_notice(mrioc, true); 985 goto out; 986 } 987 } 988 dprint_event_bh(mrioc, 989 "exposed target device with handle(0x%04x), perst_id(%d)\n", 990 tgtdev->dev_handle, perst_id); 991 goto out; 992 } else 993 mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); 994 out: 995 if (tgtdev) 996 mpi3mr_tgtdev_put(tgtdev); 997 998 return retval; 999 } 1000 1001 /** 1002 * mpi3mr_change_queue_depth- Change QD callback handler 1003 * @sdev: SCSI device reference 1004 * @q_depth: Queue depth 1005 * 1006 * Validate and limit QD and call scsi_change_queue_depth. 1007 * 1008 * Return: return value of scsi_change_queue_depth 1009 */ 1010 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 1011 int q_depth) 1012 { 1013 struct scsi_target *starget = scsi_target(sdev); 1014 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1015 int retval = 0; 1016 1017 if (!sdev->tagged_supported) 1018 q_depth = 1; 1019 if (q_depth > shost->can_queue) 1020 q_depth = shost->can_queue; 1021 else if (!q_depth) 1022 q_depth = MPI3MR_DEFAULT_SDEV_QD; 1023 retval = scsi_change_queue_depth(sdev, q_depth); 1024 sdev->max_queue_depth = sdev->queue_depth; 1025 1026 return retval; 1027 } 1028 1029 static void mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev *tgt_dev, 1030 struct queue_limits *lim) 1031 { 1032 u8 pgsz = tgt_dev->dev_spec.pcie_inf.pgsz ? : MPI3MR_DEFAULT_PGSZEXP; 1033 1034 lim->max_hw_sectors = tgt_dev->dev_spec.pcie_inf.mdts / 512; 1035 lim->virt_boundary_mask = (1 << pgsz) - 1; 1036 } 1037 1038 static void mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev *tgt_dev, 1039 struct queue_limits *lim) 1040 { 1041 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE && 1042 (tgt_dev->dev_spec.pcie_inf.dev_info & 1043 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 1044 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) 1045 mpi3mr_configure_nvme_dev(tgt_dev, lim); 1046 } 1047 1048 /** 1049 * mpi3mr_update_sdev - Update SCSI device information 1050 * @sdev: SCSI device reference 1051 * @data: target device reference 1052 * 1053 * This is an iterator function called for each SCSI device in a 1054 * target to update the target specific information into each 1055 * SCSI device. 1056 * 1057 * Return: Nothing. 1058 */ 1059 static void 1060 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 1061 { 1062 struct mpi3mr_tgt_dev *tgtdev; 1063 struct queue_limits lim; 1064 1065 tgtdev = (struct mpi3mr_tgt_dev *)data; 1066 if (!tgtdev) 1067 return; 1068 1069 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 1070 1071 lim = queue_limits_start_update(sdev->request_queue); 1072 mpi3mr_configure_tgt_dev(tgtdev, &lim); 1073 WARN_ON_ONCE(queue_limits_commit_update(sdev->request_queue, &lim)); 1074 } 1075 1076 /** 1077 * mpi3mr_refresh_tgtdevs - Refresh target device exposure 1078 * @mrioc: Adapter instance reference 1079 * 1080 * This is executed post controller reset to identify any 1081 * missing devices during reset and remove from the upper layers 1082 * or expose any newly detected device to the upper layers. 1083 * 1084 * Return: Nothing. 1085 */ 1086 static void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc) 1087 { 1088 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 1089 struct mpi3mr_stgt_priv_data *tgt_priv; 1090 1091 dprint_reset(mrioc, "refresh target devices: check for removals\n"); 1092 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1093 list) { 1094 if (((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) || 1095 tgtdev->is_hidden) && 1096 tgtdev->host_exposed && tgtdev->starget && 1097 tgtdev->starget->hostdata) { 1098 tgt_priv = tgtdev->starget->hostdata; 1099 tgt_priv->dev_removed = 1; 1100 atomic_set(&tgt_priv->block_io, 0); 1101 } 1102 } 1103 1104 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1105 list) { 1106 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 1107 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 1108 tgtdev->perst_id); 1109 if (tgtdev->host_exposed) 1110 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1111 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 1112 mpi3mr_tgtdev_put(tgtdev); 1113 } else if (tgtdev->is_hidden & tgtdev->host_exposed) { 1114 dprint_reset(mrioc, "hiding target device with perst_id(%d)\n", 1115 tgtdev->perst_id); 1116 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1117 } 1118 } 1119 1120 tgtdev = NULL; 1121 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1122 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 1123 !tgtdev->is_hidden) { 1124 if (!tgtdev->host_exposed) 1125 mpi3mr_report_tgtdev_to_host(mrioc, 1126 tgtdev->perst_id); 1127 else if (tgtdev->starget) 1128 starget_for_each_device(tgtdev->starget, 1129 (void *)tgtdev, mpi3mr_update_sdev); 1130 } 1131 } 1132 } 1133 1134 /** 1135 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1136 * @mrioc: Adapter instance reference 1137 * @tgtdev: Target device internal structure 1138 * @dev_pg0: New device page0 1139 * @is_added: Flag to indicate the device is just added 1140 * 1141 * Update the information from the device page0 into the driver 1142 * cached target device structure. 1143 * 1144 * Return: Nothing. 1145 */ 1146 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 1147 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, 1148 bool is_added) 1149 { 1150 u16 flags = 0; 1151 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1152 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1153 u8 prot_mask = 0; 1154 1155 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1156 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1157 tgtdev->dev_type = dev_pg0->device_form; 1158 tgtdev->io_unit_port = dev_pg0->io_unit_port; 1159 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 1160 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 1161 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 1162 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 1163 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 1164 tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); 1165 1166 if (tgtdev->encl_handle) 1167 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1168 tgtdev->encl_handle); 1169 if (enclosure_dev) 1170 tgtdev->enclosure_logical_id = le64_to_cpu( 1171 enclosure_dev->pg0.enclosure_logical_id); 1172 1173 flags = tgtdev->devpg0_flag; 1174 1175 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 1176 1177 if (is_added == true) 1178 tgtdev->io_throttle_enabled = 1179 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1180 1181 switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { 1182 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: 1183 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS; 1184 break; 1185 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB: 1186 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS; 1187 break; 1188 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT: 1189 default: 1190 tgtdev->wslen = 0; 1191 break; 1192 } 1193 1194 if (tgtdev->starget && tgtdev->starget->hostdata) { 1195 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1196 tgtdev->starget->hostdata; 1197 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 1198 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 1199 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 1200 scsi_tgt_priv_data->io_throttle_enabled = 1201 tgtdev->io_throttle_enabled; 1202 if (is_added == true) 1203 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1204 scsi_tgt_priv_data->wslen = tgtdev->wslen; 1205 } 1206 1207 switch (dev_pg0->access_status) { 1208 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 1209 case MPI3_DEVICE0_ASTATUS_PREPARE: 1210 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 1211 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 1212 break; 1213 default: 1214 tgtdev->is_hidden = 1; 1215 break; 1216 } 1217 1218 switch (tgtdev->dev_type) { 1219 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1220 { 1221 struct mpi3_device0_sas_sata_format *sasinf = 1222 &dev_pg0->device_specific.sas_sata_format; 1223 u16 dev_info = le16_to_cpu(sasinf->device_info); 1224 1225 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 1226 tgtdev->dev_spec.sas_sata_inf.sas_address = 1227 le64_to_cpu(sasinf->sas_address); 1228 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; 1229 tgtdev->dev_spec.sas_sata_inf.attached_phy_id = 1230 sasinf->attached_phy_identifier; 1231 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1232 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1233 tgtdev->is_hidden = 1; 1234 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 1235 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 1236 tgtdev->is_hidden = 1; 1237 1238 if (((tgtdev->devpg0_flag & 1239 MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) 1240 && (tgtdev->devpg0_flag & 1241 MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || 1242 (tgtdev->parent_handle == 0xFFFF)) 1243 tgtdev->non_stl = 1; 1244 if (tgtdev->dev_spec.sas_sata_inf.hba_port) 1245 tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = 1246 dev_pg0->io_unit_port; 1247 break; 1248 } 1249 case MPI3_DEVICE_DEVFORM_PCIE: 1250 { 1251 struct mpi3_device0_pcie_format *pcieinf = 1252 &dev_pg0->device_specific.pcie_format; 1253 u16 dev_info = le16_to_cpu(pcieinf->device_info); 1254 1255 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 1256 tgtdev->dev_spec.pcie_inf.capb = 1257 le32_to_cpu(pcieinf->capabilities); 1258 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1259 /* 2^12 = 4096 */ 1260 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1261 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1262 tgtdev->dev_spec.pcie_inf.mdts = 1263 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1264 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1265 tgtdev->dev_spec.pcie_inf.reset_to = 1266 max_t(u8, pcieinf->controller_reset_to, 1267 MPI3MR_INTADMCMD_TIMEOUT); 1268 tgtdev->dev_spec.pcie_inf.abort_to = 1269 max_t(u8, pcieinf->nvme_abort_to, 1270 MPI3MR_INTADMCMD_TIMEOUT); 1271 } 1272 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1273 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1274 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1275 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1276 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1277 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1278 tgtdev->is_hidden = 1; 1279 tgtdev->non_stl = 1; 1280 if (!mrioc->shost) 1281 break; 1282 prot_mask = scsi_host_get_prot(mrioc->shost); 1283 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1284 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1285 ioc_info(mrioc, 1286 "%s : Disabling DIX0 prot capability\n", __func__); 1287 ioc_info(mrioc, 1288 "because HBA does not support DIX0 operation on NVME drives\n"); 1289 } 1290 break; 1291 } 1292 case MPI3_DEVICE_DEVFORM_VD: 1293 { 1294 struct mpi3_device0_vd_format *vdinf = 1295 &dev_pg0->device_specific.vd_format; 1296 struct mpi3mr_throttle_group_info *tg = NULL; 1297 u16 vdinf_io_throttle_group = 1298 le16_to_cpu(vdinf->io_throttle_group); 1299 1300 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; 1301 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1302 tgtdev->is_hidden = 1; 1303 tgtdev->non_stl = 1; 1304 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; 1305 tgtdev->dev_spec.vd_inf.tg_high = 1306 le16_to_cpu(vdinf->io_throttle_group_high) * 2048; 1307 tgtdev->dev_spec.vd_inf.tg_low = 1308 le16_to_cpu(vdinf->io_throttle_group_low) * 2048; 1309 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { 1310 tg = mrioc->throttle_groups + vdinf_io_throttle_group; 1311 tg->id = vdinf_io_throttle_group; 1312 tg->high = tgtdev->dev_spec.vd_inf.tg_high; 1313 tg->low = tgtdev->dev_spec.vd_inf.tg_low; 1314 tg->qd_reduction = 1315 tgtdev->dev_spec.vd_inf.tg_qd_reduction; 1316 if (is_added == true) 1317 tg->fw_qd = tgtdev->q_depth; 1318 tg->modified_qd = tgtdev->q_depth; 1319 } 1320 tgtdev->dev_spec.vd_inf.tg = tg; 1321 if (scsi_tgt_priv_data) 1322 scsi_tgt_priv_data->throttle_group = tg; 1323 break; 1324 } 1325 default: 1326 break; 1327 } 1328 } 1329 1330 /** 1331 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1332 * @mrioc: Adapter instance reference 1333 * @fwevt: Firmware event information. 1334 * 1335 * Process Device status Change event and based on device's new 1336 * information, either expose the device to the upper layers, or 1337 * remove the device from upper layers. 1338 * 1339 * Return: Nothing. 1340 */ 1341 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1342 struct mpi3mr_fwevt *fwevt) 1343 { 1344 u16 dev_handle = 0; 1345 u8 uhide = 0, delete = 0, cleanup = 0; 1346 struct mpi3mr_tgt_dev *tgtdev = NULL; 1347 struct mpi3_event_data_device_status_change *evtdata = 1348 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1349 1350 dev_handle = le16_to_cpu(evtdata->dev_handle); 1351 dprint_event_bh(mrioc, 1352 "processing device status change event bottom half for handle(0x%04x), rc(0x%02x)\n", 1353 dev_handle, evtdata->reason_code); 1354 switch (evtdata->reason_code) { 1355 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1356 delete = 1; 1357 break; 1358 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1359 uhide = 1; 1360 break; 1361 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1362 delete = 1; 1363 cleanup = 1; 1364 break; 1365 default: 1366 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1367 evtdata->reason_code); 1368 break; 1369 } 1370 1371 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1372 if (!tgtdev) { 1373 dprint_event_bh(mrioc, 1374 "processing device status change event bottom half,\n" 1375 "cannot identify target device for handle(0x%04x), rc(0x%02x)\n", 1376 dev_handle, evtdata->reason_code); 1377 goto out; 1378 } 1379 if (uhide) { 1380 tgtdev->is_hidden = 0; 1381 if (!tgtdev->host_exposed) 1382 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1383 } 1384 1385 if (delete) 1386 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1387 1388 if (cleanup) { 1389 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1390 mpi3mr_tgtdev_put(tgtdev); 1391 } 1392 1393 out: 1394 if (tgtdev) 1395 mpi3mr_tgtdev_put(tgtdev); 1396 } 1397 1398 /** 1399 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1400 * @mrioc: Adapter instance reference 1401 * @dev_pg0: New device page0 1402 * 1403 * Process Device Info Change event and based on device's new 1404 * information, either expose the device to the upper layers, or 1405 * remove the device from upper layers or update the details of 1406 * the device. 1407 * 1408 * Return: Nothing. 1409 */ 1410 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1411 struct mpi3_device_page0 *dev_pg0) 1412 { 1413 struct mpi3mr_tgt_dev *tgtdev = NULL; 1414 u16 dev_handle = 0, perst_id = 0; 1415 1416 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1417 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1418 dprint_event_bh(mrioc, 1419 "processing device info change event bottom half for handle(0x%04x), perst_id(%d)\n", 1420 dev_handle, perst_id); 1421 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1422 if (!tgtdev) { 1423 dprint_event_bh(mrioc, 1424 "cannot identify target device for device info\n" 1425 "change event handle(0x%04x), perst_id(%d)\n", 1426 dev_handle, perst_id); 1427 goto out; 1428 } 1429 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); 1430 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1431 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1432 if (tgtdev->is_hidden && tgtdev->host_exposed) 1433 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1434 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1435 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1436 mpi3mr_update_sdev); 1437 out: 1438 if (tgtdev) 1439 mpi3mr_tgtdev_put(tgtdev); 1440 } 1441 1442 /** 1443 * mpi3mr_free_enclosure_list - release enclosures 1444 * @mrioc: Adapter instance reference 1445 * 1446 * Free memory allocated during encloure add. 1447 * 1448 * Return nothing. 1449 */ 1450 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) 1451 { 1452 struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; 1453 1454 list_for_each_entry_safe(enclosure_dev, 1455 enclosure_dev_next, &mrioc->enclosure_list, list) { 1456 list_del(&enclosure_dev->list); 1457 kfree(enclosure_dev); 1458 } 1459 } 1460 1461 /** 1462 * mpi3mr_enclosure_find_by_handle - enclosure search by handle 1463 * @mrioc: Adapter instance reference 1464 * @handle: Firmware device handle of the enclosure 1465 * 1466 * This searches for enclosure device based on handle, then returns the 1467 * enclosure object. 1468 * 1469 * Return: Enclosure object reference or NULL 1470 */ 1471 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( 1472 struct mpi3mr_ioc *mrioc, u16 handle) 1473 { 1474 struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; 1475 1476 list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { 1477 if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) 1478 continue; 1479 r = enclosure_dev; 1480 goto out; 1481 } 1482 out: 1483 return r; 1484 } 1485 1486 /** 1487 * mpi3mr_process_trigger_data_event_bh - Process trigger event 1488 * data 1489 * @mrioc: Adapter instance reference 1490 * @event_data: Event data 1491 * 1492 * This function releases diage buffers or issues diag fault 1493 * based on trigger conditions 1494 * 1495 * Return: Nothing 1496 */ 1497 static void mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc *mrioc, 1498 struct trigger_event_data *event_data) 1499 { 1500 struct diag_buffer_desc *trace_hdb = event_data->trace_hdb; 1501 struct diag_buffer_desc *fw_hdb = event_data->fw_hdb; 1502 unsigned long flags; 1503 int retval = 0; 1504 u8 trigger_type = event_data->trigger_type; 1505 union mpi3mr_trigger_data *trigger_data = 1506 &event_data->trigger_specific_data; 1507 1508 if (event_data->snapdump) { 1509 if (trace_hdb) 1510 mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, 1511 trigger_data, 1); 1512 if (fw_hdb) 1513 mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, 1514 trigger_data, 1); 1515 mpi3mr_soft_reset_handler(mrioc, 1516 MPI3MR_RESET_FROM_TRIGGER, 1); 1517 return; 1518 } 1519 1520 if (trace_hdb) { 1521 retval = mpi3mr_issue_diag_buf_release(mrioc, trace_hdb); 1522 if (!retval) { 1523 mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, 1524 trigger_data, 1); 1525 } 1526 spin_lock_irqsave(&mrioc->trigger_lock, flags); 1527 mrioc->trace_release_trigger_active = false; 1528 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 1529 } 1530 if (fw_hdb) { 1531 retval = mpi3mr_issue_diag_buf_release(mrioc, fw_hdb); 1532 if (!retval) { 1533 mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, 1534 trigger_data, 1); 1535 } 1536 spin_lock_irqsave(&mrioc->trigger_lock, flags); 1537 mrioc->fw_release_trigger_active = false; 1538 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 1539 } 1540 } 1541 1542 /** 1543 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event 1544 * @mrioc: Adapter instance reference 1545 * @encl_pg0: Enclosure page 0. 1546 * @is_added: Added event or not 1547 * 1548 * Return nothing. 1549 */ 1550 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, 1551 struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) 1552 { 1553 char *reason_str = NULL; 1554 1555 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) 1556 return; 1557 1558 if (is_added) 1559 reason_str = "enclosure added"; 1560 else 1561 reason_str = "enclosure dev status changed"; 1562 1563 ioc_info(mrioc, 1564 "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", 1565 reason_str, le16_to_cpu(encl_pg0->enclosure_handle), 1566 (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); 1567 ioc_info(mrioc, 1568 "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", 1569 le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, 1570 le16_to_cpu(encl_pg0->flags), 1571 ((le16_to_cpu(encl_pg0->flags) & 1572 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); 1573 } 1574 1575 /** 1576 * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf 1577 * @mrioc: Adapter instance reference 1578 * @fwevt: Firmware event reference 1579 * 1580 * Prints information about the Enclosure device status or 1581 * Enclosure add events if logging is enabled and add or remove 1582 * the enclosure from the controller's internal list of 1583 * enclosures. 1584 * 1585 * Return: Nothing. 1586 */ 1587 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, 1588 struct mpi3mr_fwevt *fwevt) 1589 { 1590 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1591 struct mpi3_enclosure_page0 *encl_pg0; 1592 u16 encl_handle; 1593 u8 added, present; 1594 1595 encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; 1596 added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; 1597 mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); 1598 1599 1600 encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); 1601 present = ((le16_to_cpu(encl_pg0->flags) & 1602 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); 1603 1604 if (encl_handle) 1605 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1606 encl_handle); 1607 if (!enclosure_dev && present) { 1608 enclosure_dev = 1609 kzalloc(sizeof(struct mpi3mr_enclosure_node), 1610 GFP_KERNEL); 1611 if (!enclosure_dev) 1612 return; 1613 list_add_tail(&enclosure_dev->list, 1614 &mrioc->enclosure_list); 1615 } 1616 if (enclosure_dev) { 1617 if (!present) { 1618 list_del(&enclosure_dev->list); 1619 kfree(enclosure_dev); 1620 } else 1621 memcpy(&enclosure_dev->pg0, encl_pg0, 1622 sizeof(enclosure_dev->pg0)); 1623 1624 } 1625 } 1626 1627 /** 1628 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1629 * @mrioc: Adapter instance reference 1630 * @event_data: SAS topology change list event data 1631 * 1632 * Prints information about the SAS topology change event. 1633 * 1634 * Return: Nothing. 1635 */ 1636 static void 1637 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1638 struct mpi3_event_data_sas_topology_change_list *event_data) 1639 { 1640 int i; 1641 u16 handle; 1642 u8 reason_code, phy_number; 1643 char *status_str = NULL; 1644 u8 link_rate, prev_link_rate; 1645 1646 switch (event_data->exp_status) { 1647 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1648 status_str = "remove"; 1649 break; 1650 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1651 status_str = "responding"; 1652 break; 1653 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1654 status_str = "remove delay"; 1655 break; 1656 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1657 status_str = "direct attached"; 1658 break; 1659 default: 1660 status_str = "unknown status"; 1661 break; 1662 } 1663 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1664 __func__, status_str); 1665 ioc_info(mrioc, 1666 "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1667 __func__, le16_to_cpu(event_data->expander_dev_handle), 1668 event_data->io_unit_port, 1669 le16_to_cpu(event_data->enclosure_handle), 1670 event_data->start_phy_num, event_data->num_entries); 1671 for (i = 0; i < event_data->num_entries; i++) { 1672 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1673 if (!handle) 1674 continue; 1675 phy_number = event_data->start_phy_num + i; 1676 reason_code = event_data->phy_entry[i].status & 1677 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1678 switch (reason_code) { 1679 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1680 status_str = "target remove"; 1681 break; 1682 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1683 status_str = "delay target remove"; 1684 break; 1685 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1686 status_str = "link status change"; 1687 break; 1688 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1689 status_str = "link status no change"; 1690 break; 1691 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1692 status_str = "target responding"; 1693 break; 1694 default: 1695 status_str = "unknown"; 1696 break; 1697 } 1698 link_rate = event_data->phy_entry[i].link_rate >> 4; 1699 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1700 ioc_info(mrioc, 1701 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1702 __func__, phy_number, handle, status_str, link_rate, 1703 prev_link_rate); 1704 } 1705 } 1706 1707 /** 1708 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1709 * @mrioc: Adapter instance reference 1710 * @fwevt: Firmware event reference 1711 * 1712 * Prints information about the SAS topology change event and 1713 * for "not responding" event code, removes the device from the 1714 * upper layers. 1715 * 1716 * Return: Nothing. 1717 */ 1718 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1719 struct mpi3mr_fwevt *fwevt) 1720 { 1721 struct mpi3_event_data_sas_topology_change_list *event_data = 1722 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1723 int i; 1724 u16 handle; 1725 u8 reason_code; 1726 u64 exp_sas_address = 0, parent_sas_address = 0; 1727 struct mpi3mr_hba_port *hba_port = NULL; 1728 struct mpi3mr_tgt_dev *tgtdev = NULL; 1729 struct mpi3mr_sas_node *sas_expander = NULL; 1730 unsigned long flags; 1731 u8 link_rate, prev_link_rate, parent_phy_number; 1732 1733 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1734 if (mrioc->sas_transport_enabled) { 1735 hba_port = mpi3mr_get_hba_port_by_id(mrioc, 1736 event_data->io_unit_port); 1737 if (le16_to_cpu(event_data->expander_dev_handle)) { 1738 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 1739 sas_expander = __mpi3mr_expander_find_by_handle(mrioc, 1740 le16_to_cpu(event_data->expander_dev_handle)); 1741 if (sas_expander) { 1742 exp_sas_address = sas_expander->sas_address; 1743 hba_port = sas_expander->hba_port; 1744 } 1745 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 1746 parent_sas_address = exp_sas_address; 1747 } else 1748 parent_sas_address = mrioc->sas_hba.sas_address; 1749 } 1750 1751 for (i = 0; i < event_data->num_entries; i++) { 1752 if (fwevt->discard) 1753 return; 1754 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1755 if (!handle) 1756 continue; 1757 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1758 if (!tgtdev) 1759 continue; 1760 1761 reason_code = event_data->phy_entry[i].status & 1762 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1763 1764 switch (reason_code) { 1765 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1766 if (tgtdev->host_exposed) 1767 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1768 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1769 mpi3mr_tgtdev_put(tgtdev); 1770 break; 1771 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1772 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1773 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1774 { 1775 if (!mrioc->sas_transport_enabled || tgtdev->non_stl 1776 || tgtdev->is_hidden) 1777 break; 1778 link_rate = event_data->phy_entry[i].link_rate >> 4; 1779 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1780 if (link_rate == prev_link_rate) 1781 break; 1782 if (!parent_sas_address) 1783 break; 1784 parent_phy_number = event_data->start_phy_num + i; 1785 mpi3mr_update_links(mrioc, parent_sas_address, handle, 1786 parent_phy_number, link_rate, hba_port); 1787 break; 1788 } 1789 default: 1790 break; 1791 } 1792 if (tgtdev) 1793 mpi3mr_tgtdev_put(tgtdev); 1794 } 1795 1796 if (mrioc->sas_transport_enabled && (event_data->exp_status == 1797 MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { 1798 if (sas_expander) 1799 mpi3mr_expander_remove(mrioc, exp_sas_address, 1800 hba_port); 1801 } 1802 } 1803 1804 /** 1805 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1806 * @mrioc: Adapter instance reference 1807 * @event_data: PCIe topology change list event data 1808 * 1809 * Prints information about the PCIe topology change event. 1810 * 1811 * Return: Nothing. 1812 */ 1813 static void 1814 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1815 struct mpi3_event_data_pcie_topology_change_list *event_data) 1816 { 1817 int i; 1818 u16 handle; 1819 u16 reason_code; 1820 u8 port_number; 1821 char *status_str = NULL; 1822 u8 link_rate, prev_link_rate; 1823 1824 switch (event_data->switch_status) { 1825 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1826 status_str = "remove"; 1827 break; 1828 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1829 status_str = "responding"; 1830 break; 1831 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1832 status_str = "remove delay"; 1833 break; 1834 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1835 status_str = "direct attached"; 1836 break; 1837 default: 1838 status_str = "unknown status"; 1839 break; 1840 } 1841 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1842 __func__, status_str); 1843 ioc_info(mrioc, 1844 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1845 __func__, le16_to_cpu(event_data->switch_dev_handle), 1846 le16_to_cpu(event_data->enclosure_handle), 1847 event_data->start_port_num, event_data->num_entries); 1848 for (i = 0; i < event_data->num_entries; i++) { 1849 handle = 1850 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1851 if (!handle) 1852 continue; 1853 port_number = event_data->start_port_num + i; 1854 reason_code = event_data->port_entry[i].port_status; 1855 switch (reason_code) { 1856 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1857 status_str = "target remove"; 1858 break; 1859 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1860 status_str = "delay target remove"; 1861 break; 1862 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1863 status_str = "link status change"; 1864 break; 1865 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1866 status_str = "link status no change"; 1867 break; 1868 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1869 status_str = "target responding"; 1870 break; 1871 default: 1872 status_str = "unknown"; 1873 break; 1874 } 1875 link_rate = event_data->port_entry[i].current_port_info & 1876 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1877 prev_link_rate = event_data->port_entry[i].previous_port_info & 1878 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1879 ioc_info(mrioc, 1880 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1881 __func__, port_number, handle, status_str, link_rate, 1882 prev_link_rate); 1883 } 1884 } 1885 1886 /** 1887 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1888 * @mrioc: Adapter instance reference 1889 * @fwevt: Firmware event reference 1890 * 1891 * Prints information about the PCIe topology change event and 1892 * for "not responding" event code, removes the device from the 1893 * upper layers. 1894 * 1895 * Return: Nothing. 1896 */ 1897 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1898 struct mpi3mr_fwevt *fwevt) 1899 { 1900 struct mpi3_event_data_pcie_topology_change_list *event_data = 1901 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1902 int i; 1903 u16 handle; 1904 u8 reason_code; 1905 struct mpi3mr_tgt_dev *tgtdev = NULL; 1906 1907 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1908 1909 for (i = 0; i < event_data->num_entries; i++) { 1910 if (fwevt->discard) 1911 return; 1912 handle = 1913 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1914 if (!handle) 1915 continue; 1916 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1917 if (!tgtdev) 1918 continue; 1919 1920 reason_code = event_data->port_entry[i].port_status; 1921 1922 switch (reason_code) { 1923 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1924 if (tgtdev->host_exposed) 1925 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1926 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1927 mpi3mr_tgtdev_put(tgtdev); 1928 break; 1929 default: 1930 break; 1931 } 1932 if (tgtdev) 1933 mpi3mr_tgtdev_put(tgtdev); 1934 } 1935 } 1936 1937 /** 1938 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 1939 * @mrioc: Adapter instance reference 1940 * @fwevt: Firmware event reference 1941 * 1942 * Extracts the event data and calls application interfacing 1943 * function to process the event further. 1944 * 1945 * Return: Nothing. 1946 */ 1947 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 1948 struct mpi3mr_fwevt *fwevt) 1949 { 1950 mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1951 fwevt->event_data_size); 1952 } 1953 1954 /** 1955 * mpi3mr_update_sdev_qd - Update SCSI device queue depath 1956 * @sdev: SCSI device reference 1957 * @data: Queue depth reference 1958 * 1959 * This is an iterator function called for each SCSI device in a 1960 * target to update the QD of each SCSI device. 1961 * 1962 * Return: Nothing. 1963 */ 1964 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) 1965 { 1966 u16 *q_depth = (u16 *)data; 1967 1968 scsi_change_queue_depth(sdev, (int)*q_depth); 1969 sdev->max_queue_depth = sdev->queue_depth; 1970 } 1971 1972 /** 1973 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs 1974 * @mrioc: Adapter instance reference 1975 * @tg: Throttle group information pointer 1976 * 1977 * Accessor to reduce QD for each device associated with the 1978 * given throttle group. 1979 * 1980 * Return: None. 1981 */ 1982 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 1983 struct mpi3mr_throttle_group_info *tg) 1984 { 1985 unsigned long flags; 1986 struct mpi3mr_tgt_dev *tgtdev; 1987 struct mpi3mr_stgt_priv_data *tgt_priv; 1988 1989 1990 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1991 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1992 if (tgtdev->starget && tgtdev->starget->hostdata) { 1993 tgt_priv = tgtdev->starget->hostdata; 1994 if (tgt_priv->throttle_group == tg) { 1995 dprint_event_bh(mrioc, 1996 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", 1997 tgt_priv->perst_id, tgtdev->q_depth, 1998 tg->modified_qd); 1999 starget_for_each_device(tgtdev->starget, 2000 (void *)&tg->modified_qd, 2001 mpi3mr_update_sdev_qd); 2002 } 2003 } 2004 } 2005 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2006 } 2007 2008 /** 2009 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 2010 * @mrioc: Adapter instance reference 2011 * @fwevt: Firmware event reference 2012 * 2013 * Identifies the firmware event and calls corresponding bottomg 2014 * half handler and sends event acknowledgment if required. 2015 * 2016 * Return: Nothing. 2017 */ 2018 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 2019 struct mpi3mr_fwevt *fwevt) 2020 { 2021 struct mpi3_device_page0 *dev_pg0 = NULL; 2022 u16 perst_id, handle, dev_info; 2023 struct mpi3_device0_sas_sata_format *sasinf = NULL; 2024 unsigned int timeout; 2025 2026 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 2027 mrioc->current_event = fwevt; 2028 2029 if (mrioc->stop_drv_processing) { 2030 dprint_event_bh(mrioc, "ignoring event(0x%02x) in the bottom half handler\n" 2031 "due to stop_drv_processing\n", fwevt->event_id); 2032 goto out; 2033 } 2034 2035 if (mrioc->unrecoverable) { 2036 dprint_event_bh(mrioc, 2037 "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", 2038 fwevt->event_id); 2039 goto out; 2040 } 2041 2042 if (!fwevt->process_evt) 2043 goto evt_ack; 2044 2045 dprint_event_bh(mrioc, "processing event(0x%02x) in the bottom half handler\n", 2046 fwevt->event_id); 2047 2048 switch (fwevt->event_id) { 2049 case MPI3_EVENT_DEVICE_ADDED: 2050 { 2051 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 2052 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2053 handle = le16_to_cpu(dev_pg0->dev_handle); 2054 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 2055 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 2056 else if (mrioc->sas_transport_enabled && 2057 (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 2058 sasinf = &dev_pg0->device_specific.sas_sata_format; 2059 dev_info = le16_to_cpu(sasinf->device_info); 2060 if (!mrioc->sas_hba.num_phys) 2061 mpi3mr_sas_host_add(mrioc); 2062 else 2063 mpi3mr_sas_host_refresh(mrioc); 2064 2065 if (mpi3mr_is_expander_device(dev_info)) 2066 mpi3mr_expander_add(mrioc, handle); 2067 } 2068 break; 2069 } 2070 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2071 { 2072 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 2073 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2074 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 2075 mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); 2076 break; 2077 } 2078 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2079 { 2080 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 2081 break; 2082 } 2083 case MPI3_EVENT_ENCL_DEVICE_ADDED: 2084 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2085 { 2086 mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); 2087 break; 2088 } 2089 2090 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2091 { 2092 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 2093 break; 2094 } 2095 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2096 { 2097 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 2098 break; 2099 } 2100 case MPI3_EVENT_LOG_DATA: 2101 { 2102 mpi3mr_logdata_evt_bh(mrioc, fwevt); 2103 break; 2104 } 2105 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: 2106 { 2107 struct mpi3mr_throttle_group_info *tg; 2108 2109 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; 2110 dprint_event_bh(mrioc, 2111 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", 2112 tg->id, tg->need_qd_reduction); 2113 if (tg->need_qd_reduction) { 2114 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); 2115 tg->need_qd_reduction = 0; 2116 } 2117 break; 2118 } 2119 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: 2120 { 2121 timeout = MPI3MR_RESET_TIMEOUT * 2; 2122 while ((mrioc->device_refresh_on || mrioc->block_on_pci_err) && 2123 !mrioc->unrecoverable && !mrioc->pci_err_recovery) { 2124 msleep(500); 2125 if (!timeout--) { 2126 mrioc->unrecoverable = 1; 2127 break; 2128 } 2129 } 2130 2131 if (mrioc->unrecoverable || mrioc->pci_err_recovery) 2132 break; 2133 2134 dprint_event_bh(mrioc, 2135 "scan for non responding and newly added devices after soft reset started\n"); 2136 if (mrioc->sas_transport_enabled) { 2137 mpi3mr_refresh_sas_ports(mrioc); 2138 mpi3mr_refresh_expanders(mrioc); 2139 } 2140 mpi3mr_refresh_tgtdevs(mrioc); 2141 ioc_info(mrioc, 2142 "scan for non responding and newly added devices after soft reset completed\n"); 2143 break; 2144 } 2145 case MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER: 2146 { 2147 mpi3mr_process_trigger_data_event_bh(mrioc, 2148 (struct trigger_event_data *)fwevt->event_data); 2149 break; 2150 } 2151 default: 2152 break; 2153 } 2154 2155 evt_ack: 2156 if (fwevt->send_ack) 2157 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 2158 fwevt->evt_ctx); 2159 out: 2160 /* Put fwevt reference count to neutralize kref_init increment */ 2161 mpi3mr_fwevt_put(fwevt); 2162 mrioc->current_event = NULL; 2163 } 2164 2165 /** 2166 * mpi3mr_fwevt_worker - Firmware event worker 2167 * @work: Work struct containing firmware event 2168 * 2169 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 2170 * 2171 * Return: Nothing. 2172 */ 2173 static void mpi3mr_fwevt_worker(struct work_struct *work) 2174 { 2175 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 2176 work); 2177 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 2178 /* 2179 * Put fwevt reference count after 2180 * dequeuing it from worker queue 2181 */ 2182 mpi3mr_fwevt_put(fwevt); 2183 } 2184 2185 /** 2186 * mpi3mr_create_tgtdev - Create and add a target device 2187 * @mrioc: Adapter instance reference 2188 * @dev_pg0: Device Page 0 data 2189 * 2190 * If the device specified by the device page 0 data is not 2191 * present in the driver's internal list, allocate the memory 2192 * for the device, populate the data and add to the list, else 2193 * update the device data. The key is persistent ID. 2194 * 2195 * Return: 0 on success, -ENOMEM on memory allocation failure 2196 */ 2197 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 2198 struct mpi3_device_page0 *dev_pg0) 2199 { 2200 int retval = 0; 2201 struct mpi3mr_tgt_dev *tgtdev = NULL; 2202 u16 perst_id = 0; 2203 unsigned long flags; 2204 2205 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2206 if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) 2207 return retval; 2208 2209 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2210 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 2211 if (tgtdev) 2212 tgtdev->state = MPI3MR_DEV_CREATED; 2213 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2214 2215 if (tgtdev) { 2216 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2217 mpi3mr_tgtdev_put(tgtdev); 2218 } else { 2219 tgtdev = mpi3mr_alloc_tgtdev(); 2220 if (!tgtdev) 2221 return -ENOMEM; 2222 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2223 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 2224 } 2225 2226 return retval; 2227 } 2228 2229 /** 2230 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 2231 * @mrioc: Adapter instance reference 2232 * 2233 * Flush pending commands in the delayed lists due to a 2234 * controller reset or driver removal as a cleanup. 2235 * 2236 * Return: Nothing 2237 */ 2238 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 2239 { 2240 struct delayed_dev_rmhs_node *_rmhs_node; 2241 struct delayed_evt_ack_node *_evtack_node; 2242 2243 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 2244 while (!list_empty(&mrioc->delayed_rmhs_list)) { 2245 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 2246 struct delayed_dev_rmhs_node, list); 2247 list_del(&_rmhs_node->list); 2248 kfree(_rmhs_node); 2249 } 2250 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 2251 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2252 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 2253 struct delayed_evt_ack_node, list); 2254 list_del(&_evtack_node->list); 2255 kfree(_evtack_node); 2256 } 2257 } 2258 2259 /** 2260 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 2261 * @mrioc: Adapter instance reference 2262 * @drv_cmd: Internal command tracker 2263 * 2264 * Issues a target reset TM to the firmware from the device 2265 * removal TM pend list or retry the removal handshake sequence 2266 * based on the IOU control request IOC status. 2267 * 2268 * Return: Nothing 2269 */ 2270 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 2271 struct mpi3mr_drv_cmd *drv_cmd) 2272 { 2273 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2274 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2275 2276 if (drv_cmd->state & MPI3MR_CMD_RESET) 2277 goto clear_drv_cmd; 2278 2279 ioc_info(mrioc, 2280 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 2281 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 2282 drv_cmd->ioc_loginfo); 2283 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2284 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 2285 drv_cmd->retry_count++; 2286 ioc_info(mrioc, 2287 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 2288 __func__, drv_cmd->dev_handle, 2289 drv_cmd->retry_count); 2290 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 2291 drv_cmd, drv_cmd->iou_rc); 2292 return; 2293 } 2294 ioc_err(mrioc, 2295 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 2296 __func__, drv_cmd->dev_handle); 2297 } else { 2298 ioc_info(mrioc, 2299 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 2300 __func__, drv_cmd->dev_handle); 2301 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 2302 } 2303 2304 if (!list_empty(&mrioc->delayed_rmhs_list)) { 2305 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 2306 struct delayed_dev_rmhs_node, list); 2307 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 2308 drv_cmd->retry_count = 0; 2309 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 2310 ioc_info(mrioc, 2311 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 2312 __func__, drv_cmd->dev_handle); 2313 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 2314 drv_cmd->iou_rc); 2315 list_del(&delayed_dev_rmhs->list); 2316 kfree(delayed_dev_rmhs); 2317 return; 2318 } 2319 2320 clear_drv_cmd: 2321 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2322 drv_cmd->callback = NULL; 2323 drv_cmd->retry_count = 0; 2324 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2325 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2326 } 2327 2328 /** 2329 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 2330 * @mrioc: Adapter instance reference 2331 * @drv_cmd: Internal command tracker 2332 * 2333 * Issues a target reset TM to the firmware from the device 2334 * removal TM pend list or issue IO unit control request as 2335 * part of device removal or hidden acknowledgment handshake. 2336 * 2337 * Return: Nothing 2338 */ 2339 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 2340 struct mpi3mr_drv_cmd *drv_cmd) 2341 { 2342 struct mpi3_iounit_control_request iou_ctrl; 2343 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2344 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 2345 int retval; 2346 2347 if (drv_cmd->state & MPI3MR_CMD_RESET) 2348 goto clear_drv_cmd; 2349 2350 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 2351 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 2352 2353 if (tm_reply) 2354 pr_info(IOCNAME 2355 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 2356 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 2357 drv_cmd->ioc_loginfo, 2358 le32_to_cpu(tm_reply->termination_count)); 2359 2360 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 2361 mrioc->name, drv_cmd->dev_handle, cmd_idx); 2362 2363 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2364 2365 drv_cmd->state = MPI3MR_CMD_PENDING; 2366 drv_cmd->is_waiting = 0; 2367 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 2368 iou_ctrl.operation = drv_cmd->iou_rc; 2369 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 2370 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 2371 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2372 2373 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 2374 1); 2375 if (retval) { 2376 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 2377 mrioc->name); 2378 goto clear_drv_cmd; 2379 } 2380 2381 return; 2382 clear_drv_cmd: 2383 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2384 drv_cmd->callback = NULL; 2385 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2386 drv_cmd->retry_count = 0; 2387 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2388 } 2389 2390 /** 2391 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 2392 * @mrioc: Adapter instance reference 2393 * @handle: Device handle 2394 * @cmdparam: Internal command tracker 2395 * @iou_rc: IO unit reason code 2396 * 2397 * Issues a target reset TM to the firmware or add it to a pend 2398 * list as part of device removal or hidden acknowledgment 2399 * handshake. 2400 * 2401 * Return: Nothing 2402 */ 2403 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 2404 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 2405 { 2406 struct mpi3_scsi_task_mgmt_request tm_req; 2407 int retval = 0; 2408 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2409 u8 retrycount = 5; 2410 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2411 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2412 struct mpi3mr_tgt_dev *tgtdev = NULL; 2413 unsigned long flags; 2414 2415 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2416 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2417 if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) 2418 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; 2419 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2420 2421 if (drv_cmd) 2422 goto issue_cmd; 2423 do { 2424 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 2425 MPI3MR_NUM_DEVRMCMD); 2426 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 2427 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 2428 break; 2429 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2430 } 2431 } while (retrycount--); 2432 2433 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 2434 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 2435 GFP_ATOMIC); 2436 if (!delayed_dev_rmhs) 2437 return; 2438 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 2439 delayed_dev_rmhs->handle = handle; 2440 delayed_dev_rmhs->iou_rc = iou_rc; 2441 list_add_tail(&delayed_dev_rmhs->list, 2442 &mrioc->delayed_rmhs_list); 2443 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 2444 __func__, handle); 2445 return; 2446 } 2447 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 2448 2449 issue_cmd: 2450 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2451 ioc_info(mrioc, 2452 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 2453 __func__, handle, cmd_idx); 2454 2455 memset(&tm_req, 0, sizeof(tm_req)); 2456 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2457 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 2458 goto out; 2459 } 2460 drv_cmd->state = MPI3MR_CMD_PENDING; 2461 drv_cmd->is_waiting = 0; 2462 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 2463 drv_cmd->dev_handle = handle; 2464 drv_cmd->iou_rc = iou_rc; 2465 tm_req.dev_handle = cpu_to_le16(handle); 2466 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2467 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2468 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 2469 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 2470 2471 set_bit(handle, mrioc->removepend_bitmap); 2472 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 2473 if (retval) { 2474 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 2475 __func__); 2476 goto out_failed; 2477 } 2478 out: 2479 return; 2480 out_failed: 2481 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2482 drv_cmd->callback = NULL; 2483 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2484 drv_cmd->retry_count = 0; 2485 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2486 } 2487 2488 /** 2489 * mpi3mr_complete_evt_ack - event ack request completion 2490 * @mrioc: Adapter instance reference 2491 * @drv_cmd: Internal command tracker 2492 * 2493 * This is the completion handler for non blocking event 2494 * acknowledgment sent to the firmware and this will issue any 2495 * pending event acknowledgment request. 2496 * 2497 * Return: Nothing 2498 */ 2499 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 2500 struct mpi3mr_drv_cmd *drv_cmd) 2501 { 2502 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2503 struct delayed_evt_ack_node *delayed_evtack = NULL; 2504 2505 if (drv_cmd->state & MPI3MR_CMD_RESET) 2506 goto clear_drv_cmd; 2507 2508 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2509 dprint_event_th(mrioc, 2510 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 2511 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2512 drv_cmd->ioc_loginfo); 2513 } 2514 2515 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2516 delayed_evtack = 2517 list_entry(mrioc->delayed_evtack_cmds_list.next, 2518 struct delayed_evt_ack_node, list); 2519 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 2520 delayed_evtack->event_ctx); 2521 list_del(&delayed_evtack->list); 2522 kfree(delayed_evtack); 2523 return; 2524 } 2525 clear_drv_cmd: 2526 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2527 drv_cmd->callback = NULL; 2528 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2529 } 2530 2531 /** 2532 * mpi3mr_send_event_ack - Issue event acknwoledgment request 2533 * @mrioc: Adapter instance reference 2534 * @event: MPI3 event id 2535 * @cmdparam: Internal command tracker 2536 * @event_ctx: event context 2537 * 2538 * Issues event acknowledgment request to the firmware if there 2539 * is a free command to send the event ack else it to a pend 2540 * list so that it will be processed on a completion of a prior 2541 * event acknowledgment . 2542 * 2543 * Return: Nothing 2544 */ 2545 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2546 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 2547 { 2548 struct mpi3_event_ack_request evtack_req; 2549 int retval = 0; 2550 u8 retrycount = 5; 2551 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2552 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2553 struct delayed_evt_ack_node *delayed_evtack = NULL; 2554 2555 if (drv_cmd) { 2556 dprint_event_th(mrioc, 2557 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2558 event, event_ctx); 2559 goto issue_cmd; 2560 } 2561 dprint_event_th(mrioc, 2562 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2563 event, event_ctx); 2564 do { 2565 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 2566 MPI3MR_NUM_EVTACKCMD); 2567 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 2568 if (!test_and_set_bit(cmd_idx, 2569 mrioc->evtack_cmds_bitmap)) 2570 break; 2571 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2572 } 2573 } while (retrycount--); 2574 2575 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 2576 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 2577 GFP_ATOMIC); 2578 if (!delayed_evtack) 2579 return; 2580 INIT_LIST_HEAD(&delayed_evtack->list); 2581 delayed_evtack->event = event; 2582 delayed_evtack->event_ctx = event_ctx; 2583 list_add_tail(&delayed_evtack->list, 2584 &mrioc->delayed_evtack_cmds_list); 2585 dprint_event_th(mrioc, 2586 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 2587 event, event_ctx); 2588 return; 2589 } 2590 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 2591 2592 issue_cmd: 2593 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2594 2595 memset(&evtack_req, 0, sizeof(evtack_req)); 2596 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2597 dprint_event_th(mrioc, 2598 "sending event ack failed due to command in use\n"); 2599 goto out; 2600 } 2601 drv_cmd->state = MPI3MR_CMD_PENDING; 2602 drv_cmd->is_waiting = 0; 2603 drv_cmd->callback = mpi3mr_complete_evt_ack; 2604 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2605 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2606 evtack_req.event = event; 2607 evtack_req.event_context = cpu_to_le32(event_ctx); 2608 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2609 sizeof(evtack_req), 1); 2610 if (retval) { 2611 dprint_event_th(mrioc, 2612 "posting event ack request is failed\n"); 2613 goto out_failed; 2614 } 2615 2616 dprint_event_th(mrioc, 2617 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 2618 event, event_ctx); 2619 out: 2620 return; 2621 out_failed: 2622 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2623 drv_cmd->callback = NULL; 2624 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2625 } 2626 2627 /** 2628 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 2629 * @mrioc: Adapter instance reference 2630 * @event_reply: event data 2631 * 2632 * Checks for the reason code and based on that either block I/O 2633 * to device, or unblock I/O to the device, or start the device 2634 * removal handshake with reason as remove with the firmware for 2635 * PCIe devices. 2636 * 2637 * Return: Nothing 2638 */ 2639 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 2640 struct mpi3_event_notification_reply *event_reply) 2641 { 2642 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 2643 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 2644 int i; 2645 u16 handle; 2646 u8 reason_code; 2647 struct mpi3mr_tgt_dev *tgtdev = NULL; 2648 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2649 2650 for (i = 0; i < topo_evt->num_entries; i++) { 2651 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 2652 if (!handle) 2653 continue; 2654 reason_code = topo_evt->port_entry[i].port_status; 2655 scsi_tgt_priv_data = NULL; 2656 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2657 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2658 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2659 tgtdev->starget->hostdata; 2660 switch (reason_code) { 2661 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2662 if (scsi_tgt_priv_data) { 2663 scsi_tgt_priv_data->dev_removed = 1; 2664 scsi_tgt_priv_data->dev_removedelay = 0; 2665 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2666 } 2667 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2668 MPI3_CTRL_OP_REMOVE_DEVICE); 2669 break; 2670 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 2671 if (scsi_tgt_priv_data) { 2672 scsi_tgt_priv_data->dev_removedelay = 1; 2673 atomic_inc(&scsi_tgt_priv_data->block_io); 2674 } 2675 break; 2676 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 2677 if (scsi_tgt_priv_data && 2678 scsi_tgt_priv_data->dev_removedelay) { 2679 scsi_tgt_priv_data->dev_removedelay = 0; 2680 atomic_dec_if_positive 2681 (&scsi_tgt_priv_data->block_io); 2682 } 2683 break; 2684 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2685 default: 2686 break; 2687 } 2688 if (tgtdev) 2689 mpi3mr_tgtdev_put(tgtdev); 2690 } 2691 } 2692 2693 /** 2694 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2695 * @mrioc: Adapter instance reference 2696 * @event_reply: event data 2697 * 2698 * Checks for the reason code and based on that either block I/O 2699 * to device, or unblock I/O to the device, or start the device 2700 * removal handshake with reason as remove with the firmware for 2701 * SAS/SATA devices. 2702 * 2703 * Return: Nothing 2704 */ 2705 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2706 struct mpi3_event_notification_reply *event_reply) 2707 { 2708 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2709 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2710 int i; 2711 u16 handle; 2712 u8 reason_code; 2713 struct mpi3mr_tgt_dev *tgtdev = NULL; 2714 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2715 2716 for (i = 0; i < topo_evt->num_entries; i++) { 2717 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2718 if (!handle) 2719 continue; 2720 reason_code = topo_evt->phy_entry[i].status & 2721 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2722 scsi_tgt_priv_data = NULL; 2723 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2724 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2725 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2726 tgtdev->starget->hostdata; 2727 switch (reason_code) { 2728 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2729 if (scsi_tgt_priv_data) { 2730 scsi_tgt_priv_data->dev_removed = 1; 2731 scsi_tgt_priv_data->dev_removedelay = 0; 2732 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2733 } 2734 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2735 MPI3_CTRL_OP_REMOVE_DEVICE); 2736 break; 2737 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2738 if (scsi_tgt_priv_data) { 2739 scsi_tgt_priv_data->dev_removedelay = 1; 2740 atomic_inc(&scsi_tgt_priv_data->block_io); 2741 } 2742 break; 2743 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2744 if (scsi_tgt_priv_data && 2745 scsi_tgt_priv_data->dev_removedelay) { 2746 scsi_tgt_priv_data->dev_removedelay = 0; 2747 atomic_dec_if_positive 2748 (&scsi_tgt_priv_data->block_io); 2749 } 2750 break; 2751 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2752 default: 2753 break; 2754 } 2755 if (tgtdev) 2756 mpi3mr_tgtdev_put(tgtdev); 2757 } 2758 } 2759 2760 /** 2761 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2762 * @mrioc: Adapter instance reference 2763 * @event_reply: event data 2764 * 2765 * Checks for the reason code and based on that either block I/O 2766 * to device, or unblock I/O to the device, or start the device 2767 * removal handshake with reason as remove/hide acknowledgment 2768 * with the firmware. 2769 * 2770 * Return: Nothing 2771 */ 2772 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2773 struct mpi3_event_notification_reply *event_reply) 2774 { 2775 u16 dev_handle = 0; 2776 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2777 struct mpi3mr_tgt_dev *tgtdev = NULL; 2778 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2779 struct mpi3_event_data_device_status_change *evtdata = 2780 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2781 2782 if (mrioc->stop_drv_processing) 2783 goto out; 2784 2785 dev_handle = le16_to_cpu(evtdata->dev_handle); 2786 dprint_event_th(mrioc, 2787 "device status change event top half with rc(0x%02x) for handle(0x%04x)\n", 2788 evtdata->reason_code, dev_handle); 2789 2790 switch (evtdata->reason_code) { 2791 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2792 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2793 block = 1; 2794 break; 2795 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2796 delete = 1; 2797 hide = 1; 2798 break; 2799 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2800 delete = 1; 2801 remove = 1; 2802 break; 2803 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2804 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2805 ublock = 1; 2806 break; 2807 default: 2808 break; 2809 } 2810 2811 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2812 if (!tgtdev) { 2813 dprint_event_th(mrioc, 2814 "processing device status change event could not identify device for handle(0x%04x)\n", 2815 dev_handle); 2816 goto out; 2817 } 2818 if (hide) 2819 tgtdev->is_hidden = hide; 2820 if (tgtdev->starget && tgtdev->starget->hostdata) { 2821 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2822 tgtdev->starget->hostdata; 2823 if (block) 2824 atomic_inc(&scsi_tgt_priv_data->block_io); 2825 if (delete) 2826 scsi_tgt_priv_data->dev_removed = 1; 2827 if (ublock) 2828 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2829 } 2830 if (remove) 2831 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2832 MPI3_CTRL_OP_REMOVE_DEVICE); 2833 if (hide) 2834 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2835 MPI3_CTRL_OP_HIDDEN_ACK); 2836 2837 out: 2838 if (tgtdev) 2839 mpi3mr_tgtdev_put(tgtdev); 2840 } 2841 2842 /** 2843 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2844 * @mrioc: Adapter instance reference 2845 * @event_reply: event data 2846 * 2847 * Blocks and unblocks host level I/O based on the reason code 2848 * 2849 * Return: Nothing 2850 */ 2851 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2852 struct mpi3_event_notification_reply *event_reply) 2853 { 2854 struct mpi3_event_data_prepare_for_reset *evtdata = 2855 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2856 2857 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2858 dprint_event_th(mrioc, 2859 "prepare for reset event top half with rc=start\n"); 2860 if (mrioc->prepare_for_reset) 2861 return; 2862 mrioc->prepare_for_reset = 1; 2863 mrioc->prepare_for_reset_timeout_counter = 0; 2864 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2865 dprint_event_th(mrioc, 2866 "prepare for reset top half with rc=abort\n"); 2867 mrioc->prepare_for_reset = 0; 2868 mrioc->prepare_for_reset_timeout_counter = 0; 2869 } 2870 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2871 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2872 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2873 le32_to_cpu(event_reply->event_context)); 2874 } 2875 2876 /** 2877 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2878 * @mrioc: Adapter instance reference 2879 * @event_reply: event data 2880 * 2881 * Identifies the new shutdown timeout value and update. 2882 * 2883 * Return: Nothing 2884 */ 2885 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2886 struct mpi3_event_notification_reply *event_reply) 2887 { 2888 struct mpi3_event_data_energy_pack_change *evtdata = 2889 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2890 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2891 2892 if (shutdown_timeout <= 0) { 2893 dprint_event_th(mrioc, 2894 "%s :Invalid Shutdown Timeout received = %d\n", 2895 __func__, shutdown_timeout); 2896 return; 2897 } 2898 2899 dprint_event_th(mrioc, 2900 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 2901 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 2902 mrioc->facts.shutdown_timeout = shutdown_timeout; 2903 } 2904 2905 /** 2906 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 2907 * @mrioc: Adapter instance reference 2908 * @event_reply: event data 2909 * 2910 * Displays Cable manegemt event details. 2911 * 2912 * Return: Nothing 2913 */ 2914 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 2915 struct mpi3_event_notification_reply *event_reply) 2916 { 2917 struct mpi3_event_data_cable_management *evtdata = 2918 (struct mpi3_event_data_cable_management *)event_reply->event_data; 2919 2920 switch (evtdata->status) { 2921 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 2922 { 2923 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 2924 "Devices connected to this cable are not detected.\n" 2925 "This cable requires %d mW of power.\n", 2926 evtdata->receptacle_id, 2927 le32_to_cpu(evtdata->active_cable_power_requirement)); 2928 break; 2929 } 2930 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 2931 { 2932 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 2933 evtdata->receptacle_id); 2934 break; 2935 } 2936 default: 2937 break; 2938 } 2939 } 2940 2941 /** 2942 * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event 2943 * @mrioc: Adapter instance reference 2944 * 2945 * Add driver specific event to make sure that the driver won't process the 2946 * events until all the devices are refreshed during soft reset. 2947 * 2948 * Return: Nothing 2949 */ 2950 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) 2951 { 2952 struct mpi3mr_fwevt *fwevt = NULL; 2953 2954 fwevt = mpi3mr_alloc_fwevt(0); 2955 if (!fwevt) { 2956 dprint_event_th(mrioc, 2957 "failed to schedule bottom half handler for event(0x%02x)\n", 2958 MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); 2959 return; 2960 } 2961 fwevt->mrioc = mrioc; 2962 fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; 2963 fwevt->send_ack = 0; 2964 fwevt->process_evt = 1; 2965 fwevt->evt_ctx = 0; 2966 fwevt->event_data_size = 0; 2967 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2968 } 2969 2970 /** 2971 * mpi3mr_os_handle_events - Firmware event handler 2972 * @mrioc: Adapter instance reference 2973 * @event_reply: event data 2974 * 2975 * Identifies whether the event has to be handled and acknowledged, 2976 * and either processes the event in the top-half and/or schedule a 2977 * bottom-half through mpi3mr_fwevt_worker(). 2978 * 2979 * Return: Nothing 2980 */ 2981 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 2982 struct mpi3_event_notification_reply *event_reply) 2983 { 2984 u16 evt_type, sz; 2985 struct mpi3mr_fwevt *fwevt = NULL; 2986 bool ack_req = 0, process_evt_bh = 0; 2987 2988 if (mrioc->stop_drv_processing) 2989 return; 2990 2991 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2992 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2993 ack_req = 1; 2994 2995 evt_type = event_reply->event; 2996 mpi3mr_event_trigger(mrioc, event_reply->event); 2997 2998 switch (evt_type) { 2999 case MPI3_EVENT_DEVICE_ADDED: 3000 { 3001 struct mpi3_device_page0 *dev_pg0 = 3002 (struct mpi3_device_page0 *)event_reply->event_data; 3003 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 3004 dprint_event_th(mrioc, 3005 "failed to process device added event for handle(0x%04x),\n" 3006 "perst_id(%d) in the event top half handler\n", 3007 le16_to_cpu(dev_pg0->dev_handle), 3008 le16_to_cpu(dev_pg0->persistent_id)); 3009 else 3010 process_evt_bh = 1; 3011 break; 3012 } 3013 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 3014 { 3015 process_evt_bh = 1; 3016 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 3017 break; 3018 } 3019 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 3020 { 3021 process_evt_bh = 1; 3022 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 3023 break; 3024 } 3025 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 3026 { 3027 process_evt_bh = 1; 3028 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 3029 break; 3030 } 3031 case MPI3_EVENT_PREPARE_FOR_RESET: 3032 { 3033 mpi3mr_preparereset_evt_th(mrioc, event_reply); 3034 ack_req = 0; 3035 break; 3036 } 3037 case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE: 3038 { 3039 mpi3mr_hdbstatuschg_evt_th(mrioc, event_reply); 3040 break; 3041 } 3042 case MPI3_EVENT_DEVICE_INFO_CHANGED: 3043 case MPI3_EVENT_LOG_DATA: 3044 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 3045 case MPI3_EVENT_ENCL_DEVICE_ADDED: 3046 { 3047 process_evt_bh = 1; 3048 break; 3049 } 3050 case MPI3_EVENT_ENERGY_PACK_CHANGE: 3051 { 3052 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 3053 break; 3054 } 3055 case MPI3_EVENT_CABLE_MGMT: 3056 { 3057 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 3058 break; 3059 } 3060 case MPI3_EVENT_SAS_DISCOVERY: 3061 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 3062 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 3063 case MPI3_EVENT_PCIE_ENUMERATION: 3064 break; 3065 default: 3066 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 3067 __func__, evt_type); 3068 break; 3069 } 3070 if (process_evt_bh || ack_req) { 3071 dprint_event_th(mrioc, 3072 "scheduling bottom half handler for event(0x%02x),ack_required=%d\n", 3073 evt_type, ack_req); 3074 sz = event_reply->event_data_length * 4; 3075 fwevt = mpi3mr_alloc_fwevt(sz); 3076 if (!fwevt) { 3077 dprint_event_th(mrioc, 3078 "failed to schedule bottom half handler for\n" 3079 "event(0x%02x), ack_required=%d\n", evt_type, ack_req); 3080 return; 3081 } 3082 3083 memcpy(fwevt->event_data, event_reply->event_data, sz); 3084 fwevt->mrioc = mrioc; 3085 fwevt->event_id = evt_type; 3086 fwevt->send_ack = ack_req; 3087 fwevt->process_evt = process_evt_bh; 3088 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 3089 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 3090 } 3091 } 3092 3093 /** 3094 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 3095 * @mrioc: Adapter instance reference 3096 * @scmd: SCSI command reference 3097 * @scsiio_req: MPI3 SCSI IO request 3098 * 3099 * Identifies the protection information flags from the SCSI 3100 * command and set appropriate flags in the MPI3 SCSI IO 3101 * request. 3102 * 3103 * Return: Nothing 3104 */ 3105 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 3106 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3107 { 3108 u16 eedp_flags = 0; 3109 unsigned char prot_op = scsi_get_prot_op(scmd); 3110 3111 switch (prot_op) { 3112 case SCSI_PROT_NORMAL: 3113 return; 3114 case SCSI_PROT_READ_STRIP: 3115 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 3116 break; 3117 case SCSI_PROT_WRITE_INSERT: 3118 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 3119 break; 3120 case SCSI_PROT_READ_INSERT: 3121 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 3122 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3123 break; 3124 case SCSI_PROT_WRITE_STRIP: 3125 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 3126 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3127 break; 3128 case SCSI_PROT_READ_PASS: 3129 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 3130 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3131 break; 3132 case SCSI_PROT_WRITE_PASS: 3133 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 3134 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 3135 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 3136 0xffff; 3137 } else 3138 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 3139 3140 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3141 break; 3142 default: 3143 return; 3144 } 3145 3146 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 3147 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 3148 3149 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 3150 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 3151 3152 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 3153 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 3154 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3155 scsiio_req->cdb.eedp32.primary_reference_tag = 3156 cpu_to_be32(scsi_prot_ref_tag(scmd)); 3157 } 3158 3159 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 3160 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3161 3162 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 3163 3164 switch (scsi_prot_interval(scmd)) { 3165 case 512: 3166 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 3167 break; 3168 case 520: 3169 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 3170 break; 3171 case 4080: 3172 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 3173 break; 3174 case 4088: 3175 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 3176 break; 3177 case 4096: 3178 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 3179 break; 3180 case 4104: 3181 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 3182 break; 3183 case 4160: 3184 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 3185 break; 3186 default: 3187 break; 3188 } 3189 3190 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 3191 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 3192 } 3193 3194 /** 3195 * mpi3mr_build_sense_buffer - Map sense information 3196 * @desc: Sense type 3197 * @buf: Sense buffer to populate 3198 * @key: Sense key 3199 * @asc: Additional sense code 3200 * @ascq: Additional sense code qualifier 3201 * 3202 * Maps the given sense information into either descriptor or 3203 * fixed format sense data. 3204 * 3205 * Return: Nothing 3206 */ 3207 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 3208 u8 asc, u8 ascq) 3209 { 3210 if (desc) { 3211 buf[0] = 0x72; /* descriptor, current */ 3212 buf[1] = key; 3213 buf[2] = asc; 3214 buf[3] = ascq; 3215 buf[7] = 0; 3216 } else { 3217 buf[0] = 0x70; /* fixed, current */ 3218 buf[2] = key; 3219 buf[7] = 0xa; 3220 buf[12] = asc; 3221 buf[13] = ascq; 3222 } 3223 } 3224 3225 /** 3226 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 3227 * @scmd: SCSI command reference 3228 * @ioc_status: status of MPI3 request 3229 * 3230 * Maps the EEDP error status of the SCSI IO request to sense 3231 * data. 3232 * 3233 * Return: Nothing 3234 */ 3235 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 3236 u16 ioc_status) 3237 { 3238 u8 ascq = 0; 3239 3240 switch (ioc_status) { 3241 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3242 ascq = 0x01; 3243 break; 3244 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3245 ascq = 0x02; 3246 break; 3247 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3248 ascq = 0x03; 3249 break; 3250 default: 3251 ascq = 0x00; 3252 break; 3253 } 3254 3255 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3256 0x10, ascq); 3257 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 3258 } 3259 3260 /** 3261 * mpi3mr_process_op_reply_desc - reply descriptor handler 3262 * @mrioc: Adapter instance reference 3263 * @reply_desc: Operational reply descriptor 3264 * @reply_dma: place holder for reply DMA address 3265 * @qidx: Operational queue index 3266 * 3267 * Process the operational reply descriptor and identifies the 3268 * descriptor type. Based on the descriptor map the MPI3 request 3269 * status to a SCSI command status and calls scsi_done call 3270 * back. 3271 * 3272 * Return: Nothing 3273 */ 3274 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 3275 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 3276 { 3277 u16 reply_desc_type, host_tag = 0; 3278 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3279 u32 ioc_loginfo = 0; 3280 struct mpi3_status_reply_descriptor *status_desc = NULL; 3281 struct mpi3_address_reply_descriptor *addr_desc = NULL; 3282 struct mpi3_success_reply_descriptor *success_desc = NULL; 3283 struct mpi3_scsi_io_reply *scsi_reply = NULL; 3284 struct scsi_cmnd *scmd = NULL; 3285 struct scmd_priv *priv = NULL; 3286 u8 *sense_buf = NULL; 3287 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 3288 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 3289 u16 dev_handle = 0xFFFF; 3290 struct scsi_sense_hdr sshdr; 3291 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; 3292 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3293 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; 3294 struct mpi3mr_throttle_group_info *tg = NULL; 3295 u8 throttle_enabled_dev = 0; 3296 3297 *reply_dma = 0; 3298 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 3299 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 3300 switch (reply_desc_type) { 3301 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 3302 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 3303 host_tag = le16_to_cpu(status_desc->host_tag); 3304 ioc_status = le16_to_cpu(status_desc->ioc_status); 3305 if (ioc_status & 3306 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3307 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 3308 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3309 mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 3310 break; 3311 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 3312 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 3313 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 3314 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 3315 *reply_dma); 3316 if (!scsi_reply) { 3317 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 3318 mrioc->name); 3319 goto out; 3320 } 3321 host_tag = le16_to_cpu(scsi_reply->host_tag); 3322 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 3323 scsi_status = scsi_reply->scsi_status; 3324 scsi_state = scsi_reply->scsi_state; 3325 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 3326 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 3327 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 3328 sense_count = le32_to_cpu(scsi_reply->sense_count); 3329 resp_data = le32_to_cpu(scsi_reply->response_data); 3330 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 3331 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3332 if (ioc_status & 3333 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3334 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 3335 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3336 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 3337 panic("%s: Ran out of sense buffers\n", mrioc->name); 3338 if (sense_buf) { 3339 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3340 mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key, 3341 sshdr.asc, sshdr.ascq); 3342 } 3343 mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 3344 break; 3345 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 3346 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 3347 host_tag = le16_to_cpu(success_desc->host_tag); 3348 break; 3349 default: 3350 break; 3351 } 3352 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 3353 if (!scmd) { 3354 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 3355 mrioc->name, host_tag); 3356 goto out; 3357 } 3358 priv = scsi_cmd_priv(scmd); 3359 3360 data_len_blks = scsi_bufflen(scmd) >> 9; 3361 sdev_priv_data = scmd->device->hostdata; 3362 if (sdev_priv_data) { 3363 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3364 if (stgt_priv_data) { 3365 tg = stgt_priv_data->throttle_group; 3366 throttle_enabled_dev = 3367 stgt_priv_data->io_throttle_enabled; 3368 dev_handle = stgt_priv_data->dev_handle; 3369 } 3370 } 3371 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && 3372 throttle_enabled_dev)) { 3373 ioc_pend_data_len = atomic_sub_return(data_len_blks, 3374 &mrioc->pend_large_data_sz); 3375 if (tg) { 3376 tg_pend_data_len = atomic_sub_return(data_len_blks, 3377 &tg->pend_large_data_sz); 3378 if (tg->io_divert && ((ioc_pend_data_len <= 3379 mrioc->io_throttle_low) && 3380 (tg_pend_data_len <= tg->low))) { 3381 tg->io_divert = 0; 3382 mpi3mr_set_io_divert_for_all_vd_in_tg( 3383 mrioc, tg, 0); 3384 } 3385 } else { 3386 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3387 stgt_priv_data->io_divert = 0; 3388 } 3389 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { 3390 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); 3391 if (!tg) { 3392 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3393 stgt_priv_data->io_divert = 0; 3394 3395 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { 3396 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); 3397 if (tg->io_divert && (tg_pend_data_len <= tg->low)) { 3398 tg->io_divert = 0; 3399 mpi3mr_set_io_divert_for_all_vd_in_tg( 3400 mrioc, tg, 0); 3401 } 3402 } 3403 } 3404 3405 if (success_desc) { 3406 scmd->result = DID_OK << 16; 3407 goto out_success; 3408 } 3409 3410 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 3411 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 3412 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 3413 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 3414 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 3415 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3416 3417 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 3418 sense_buf) { 3419 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 3420 3421 memcpy(scmd->sense_buffer, sense_buf, sz); 3422 } 3423 3424 switch (ioc_status) { 3425 case MPI3_IOCSTATUS_BUSY: 3426 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 3427 scmd->result = SAM_STAT_BUSY; 3428 break; 3429 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3430 scmd->result = DID_NO_CONNECT << 16; 3431 break; 3432 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3433 scmd->result = DID_SOFT_ERROR << 16; 3434 break; 3435 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 3436 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 3437 scmd->result = DID_RESET << 16; 3438 break; 3439 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3440 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 3441 scmd->result = DID_SOFT_ERROR << 16; 3442 else 3443 scmd->result = (DID_OK << 16) | scsi_status; 3444 break; 3445 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 3446 scmd->result = (DID_OK << 16) | scsi_status; 3447 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 3448 break; 3449 if (xfer_count < scmd->underflow) { 3450 if (scsi_status == SAM_STAT_BUSY) 3451 scmd->result = SAM_STAT_BUSY; 3452 else 3453 scmd->result = DID_SOFT_ERROR << 16; 3454 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3455 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 3456 scmd->result = DID_SOFT_ERROR << 16; 3457 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3458 scmd->result = DID_RESET << 16; 3459 break; 3460 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 3461 scsi_set_resid(scmd, 0); 3462 fallthrough; 3463 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 3464 case MPI3_IOCSTATUS_SUCCESS: 3465 scmd->result = (DID_OK << 16) | scsi_status; 3466 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3467 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 3468 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 3469 scmd->result = DID_SOFT_ERROR << 16; 3470 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3471 scmd->result = DID_RESET << 16; 3472 break; 3473 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3474 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3475 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3476 mpi3mr_map_eedp_error(scmd, ioc_status); 3477 break; 3478 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3479 case MPI3_IOCSTATUS_INVALID_FUNCTION: 3480 case MPI3_IOCSTATUS_INVALID_SGL: 3481 case MPI3_IOCSTATUS_INTERNAL_ERROR: 3482 case MPI3_IOCSTATUS_INVALID_FIELD: 3483 case MPI3_IOCSTATUS_INVALID_STATE: 3484 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 3485 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3486 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 3487 default: 3488 scmd->result = DID_SOFT_ERROR << 16; 3489 break; 3490 } 3491 3492 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 3493 (scmd->cmnd[0] != ATA_16) && 3494 mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { 3495 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 3496 scmd->result); 3497 scsi_print_command(scmd); 3498 ioc_info(mrioc, 3499 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 3500 __func__, dev_handle, ioc_status, ioc_loginfo, 3501 priv->req_q_idx + 1); 3502 ioc_info(mrioc, 3503 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 3504 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 3505 if (sense_buf) { 3506 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3507 ioc_info(mrioc, 3508 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 3509 __func__, sense_count, sshdr.sense_key, 3510 sshdr.asc, sshdr.ascq); 3511 } 3512 } 3513 out_success: 3514 if (priv->meta_sg_valid) { 3515 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 3516 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 3517 } 3518 mpi3mr_clear_scmd_priv(mrioc, scmd); 3519 scsi_dma_unmap(scmd); 3520 scsi_done(scmd); 3521 out: 3522 if (sense_buf) 3523 mpi3mr_repost_sense_buf(mrioc, 3524 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3525 } 3526 3527 /** 3528 * mpi3mr_get_chain_idx - get free chain buffer index 3529 * @mrioc: Adapter instance reference 3530 * 3531 * Try to get a free chain buffer index from the free pool. 3532 * 3533 * Return: -1 on failure or the free chain buffer index 3534 */ 3535 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 3536 { 3537 u8 retry_count = 5; 3538 int cmd_idx = -1; 3539 unsigned long flags; 3540 3541 spin_lock_irqsave(&mrioc->chain_buf_lock, flags); 3542 do { 3543 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 3544 mrioc->chain_buf_count); 3545 if (cmd_idx < mrioc->chain_buf_count) { 3546 set_bit(cmd_idx, mrioc->chain_bitmap); 3547 break; 3548 } 3549 cmd_idx = -1; 3550 } while (retry_count--); 3551 spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); 3552 return cmd_idx; 3553 } 3554 3555 /** 3556 * mpi3mr_prepare_sg_scmd - build scatter gather list 3557 * @mrioc: Adapter instance reference 3558 * @scmd: SCSI command reference 3559 * @scsiio_req: MPI3 SCSI IO request 3560 * 3561 * This function maps SCSI command's data and protection SGEs to 3562 * MPI request SGEs. If required additional 4K chain buffer is 3563 * used to send the SGEs. 3564 * 3565 * Return: 0 on success, -ENOMEM on dma_map_sg failure 3566 */ 3567 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 3568 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3569 { 3570 dma_addr_t chain_dma; 3571 struct scatterlist *sg_scmd; 3572 void *sg_local, *chain; 3573 u32 chain_length; 3574 int sges_left, chain_idx; 3575 u32 sges_in_segment; 3576 u8 simple_sgl_flags; 3577 u8 simple_sgl_flags_last; 3578 u8 last_chain_sgl_flags; 3579 struct chain_element *chain_req; 3580 struct scmd_priv *priv = NULL; 3581 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 3582 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 3583 3584 priv = scsi_cmd_priv(scmd); 3585 3586 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 3587 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3588 simple_sgl_flags_last = simple_sgl_flags | 3589 MPI3_SGE_FLAGS_END_OF_LIST; 3590 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 3591 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3592 3593 if (meta_sg) 3594 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 3595 else 3596 sg_local = &scsiio_req->sgl; 3597 3598 if (!scsiio_req->data_length && !meta_sg) { 3599 mpi3mr_build_zero_len_sge(sg_local); 3600 return 0; 3601 } 3602 3603 if (meta_sg) { 3604 sg_scmd = scsi_prot_sglist(scmd); 3605 sges_left = dma_map_sg(&mrioc->pdev->dev, 3606 scsi_prot_sglist(scmd), 3607 scsi_prot_sg_count(scmd), 3608 scmd->sc_data_direction); 3609 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3610 } else { 3611 /* 3612 * Some firmware versions byte-swap the REPORT ZONES command 3613 * reply from ATA-ZAC devices by directly accessing in the host 3614 * buffer. This does not respect the default command DMA 3615 * direction and causes IOMMU page faults on some architectures 3616 * with an IOMMU enforcing write mappings (e.g. AMD hosts). 3617 * Avoid such issue by making the REPORT ZONES buffer mapping 3618 * bi-directional. 3619 */ 3620 if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES) 3621 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 3622 sg_scmd = scsi_sglist(scmd); 3623 sges_left = scsi_dma_map(scmd); 3624 } 3625 3626 if (sges_left < 0) { 3627 sdev_printk(KERN_ERR, scmd->device, 3628 "scsi_dma_map failed: request for %d bytes!\n", 3629 scsi_bufflen(scmd)); 3630 return -ENOMEM; 3631 } 3632 if (sges_left > mrioc->max_sgl_entries) { 3633 sdev_printk(KERN_ERR, scmd->device, 3634 "scsi_dma_map returned unsupported sge count %d!\n", 3635 sges_left); 3636 return -ENOMEM; 3637 } 3638 3639 sges_in_segment = (mrioc->facts.op_req_sz - 3640 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 3641 3642 if (scsiio_req->sgl[0].eedp.flags == 3643 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 3644 sg_local += sizeof(struct mpi3_sge_common); 3645 sges_in_segment--; 3646 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 3647 } 3648 3649 if (scsiio_req->msg_flags == 3650 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 3651 sges_in_segment--; 3652 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 3653 } 3654 3655 if (meta_sg) 3656 sges_in_segment = 1; 3657 3658 if (sges_left <= sges_in_segment) 3659 goto fill_in_last_segment; 3660 3661 /* fill in main message segment when there is a chain following */ 3662 while (sges_in_segment > 1) { 3663 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3664 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3665 sg_scmd = sg_next(sg_scmd); 3666 sg_local += sizeof(struct mpi3_sge_common); 3667 sges_left--; 3668 sges_in_segment--; 3669 } 3670 3671 chain_idx = mpi3mr_get_chain_idx(mrioc); 3672 if (chain_idx < 0) 3673 return -1; 3674 chain_req = &mrioc->chain_sgl_list[chain_idx]; 3675 if (meta_sg) 3676 priv->meta_chain_idx = chain_idx; 3677 else 3678 priv->chain_idx = chain_idx; 3679 3680 chain = chain_req->addr; 3681 chain_dma = chain_req->dma_addr; 3682 sges_in_segment = sges_left; 3683 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 3684 3685 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 3686 chain_length, chain_dma); 3687 3688 sg_local = chain; 3689 3690 fill_in_last_segment: 3691 while (sges_left > 0) { 3692 if (sges_left == 1) 3693 mpi3mr_add_sg_single(sg_local, 3694 simple_sgl_flags_last, sg_dma_len(sg_scmd), 3695 sg_dma_address(sg_scmd)); 3696 else 3697 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3698 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3699 sg_scmd = sg_next(sg_scmd); 3700 sg_local += sizeof(struct mpi3_sge_common); 3701 sges_left--; 3702 } 3703 3704 return 0; 3705 } 3706 3707 /** 3708 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 3709 * @mrioc: Adapter instance reference 3710 * @scmd: SCSI command reference 3711 * @scsiio_req: MPI3 SCSI IO request 3712 * 3713 * This function calls mpi3mr_prepare_sg_scmd for constructing 3714 * both data SGEs and protection information SGEs in the MPI 3715 * format from the SCSI Command as appropriate . 3716 * 3717 * Return: return value of mpi3mr_prepare_sg_scmd. 3718 */ 3719 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 3720 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3721 { 3722 int ret; 3723 3724 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3725 if (ret) 3726 return ret; 3727 3728 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 3729 /* There is a valid meta sg */ 3730 scsiio_req->flags |= 3731 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 3732 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3733 } 3734 3735 return ret; 3736 } 3737 3738 /** 3739 * mpi3mr_tm_response_name - get TM response as a string 3740 * @resp_code: TM response code 3741 * 3742 * Convert known task management response code as a readable 3743 * string. 3744 * 3745 * Return: response code string. 3746 */ 3747 static const char *mpi3mr_tm_response_name(u8 resp_code) 3748 { 3749 char *desc; 3750 3751 switch (resp_code) { 3752 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3753 desc = "task management request completed"; 3754 break; 3755 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 3756 desc = "invalid frame"; 3757 break; 3758 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 3759 desc = "task management request not supported"; 3760 break; 3761 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 3762 desc = "task management request failed"; 3763 break; 3764 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3765 desc = "task management request succeeded"; 3766 break; 3767 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 3768 desc = "invalid LUN"; 3769 break; 3770 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 3771 desc = "overlapped tag attempted"; 3772 break; 3773 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3774 desc = "task queued, however not sent to target"; 3775 break; 3776 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 3777 desc = "task management request denied by NVMe device"; 3778 break; 3779 default: 3780 desc = "unknown"; 3781 break; 3782 } 3783 3784 return desc; 3785 } 3786 3787 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 3788 { 3789 int i; 3790 int num_of_reply_queues = 3791 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 3792 3793 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 3794 mpi3mr_process_op_reply_q(mrioc, 3795 mrioc->intr_info[i].op_reply_q); 3796 } 3797 3798 /** 3799 * mpi3mr_issue_tm - Issue Task Management request 3800 * @mrioc: Adapter instance reference 3801 * @tm_type: Task Management type 3802 * @handle: Device handle 3803 * @lun: lun ID 3804 * @htag: Host tag of the TM request 3805 * @timeout: TM timeout value 3806 * @drv_cmd: Internal command tracker 3807 * @resp_code: Response code place holder 3808 * @scmd: SCSI command 3809 * 3810 * Issues a Task Management Request to the controller for a 3811 * specified target, lun and command and wait for its completion 3812 * and check TM response. Recover the TM if it timed out by 3813 * issuing controller reset. 3814 * 3815 * Return: 0 on success, non-zero on errors 3816 */ 3817 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3818 u16 handle, uint lun, u16 htag, ulong timeout, 3819 struct mpi3mr_drv_cmd *drv_cmd, 3820 u8 *resp_code, struct scsi_cmnd *scmd) 3821 { 3822 struct mpi3_scsi_task_mgmt_request tm_req; 3823 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3824 int retval = 0; 3825 struct mpi3mr_tgt_dev *tgtdev = NULL; 3826 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3827 struct scmd_priv *cmd_priv = NULL; 3828 struct scsi_device *sdev = NULL; 3829 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3830 3831 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3832 __func__, tm_type, handle); 3833 if (mrioc->unrecoverable) { 3834 retval = -1; 3835 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3836 __func__); 3837 goto out; 3838 } 3839 3840 memset(&tm_req, 0, sizeof(tm_req)); 3841 mutex_lock(&drv_cmd->mutex); 3842 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3843 retval = -1; 3844 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3845 mutex_unlock(&drv_cmd->mutex); 3846 goto out; 3847 } 3848 if (mrioc->reset_in_progress) { 3849 retval = -1; 3850 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3851 mutex_unlock(&drv_cmd->mutex); 3852 goto out; 3853 } 3854 if (mrioc->block_on_pci_err) { 3855 retval = -1; 3856 dprint_tm(mrioc, "sending task management failed due to\n" 3857 "pci error recovery in progress\n"); 3858 mutex_unlock(&drv_cmd->mutex); 3859 goto out; 3860 } 3861 3862 drv_cmd->state = MPI3MR_CMD_PENDING; 3863 drv_cmd->is_waiting = 1; 3864 drv_cmd->callback = NULL; 3865 tm_req.dev_handle = cpu_to_le16(handle); 3866 tm_req.task_type = tm_type; 3867 tm_req.host_tag = cpu_to_le16(htag); 3868 3869 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3870 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3871 3872 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3873 3874 if (scmd) { 3875 if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 3876 cmd_priv = scsi_cmd_priv(scmd); 3877 if (!cmd_priv) 3878 goto out_unlock; 3879 3880 struct op_req_qinfo *op_req_q; 3881 3882 op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx]; 3883 tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag); 3884 tm_req.task_request_queue_id = 3885 cpu_to_le16(op_req_q->qid); 3886 } 3887 sdev = scmd->device; 3888 sdev_priv_data = sdev->hostdata; 3889 scsi_tgt_priv_data = ((sdev_priv_data) ? 3890 sdev_priv_data->tgt_priv_data : NULL); 3891 } else { 3892 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 3893 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 3894 tgtdev->starget->hostdata; 3895 } 3896 3897 if (scsi_tgt_priv_data) 3898 atomic_inc(&scsi_tgt_priv_data->block_io); 3899 3900 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { 3901 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) 3902 timeout = tgtdev->dev_spec.pcie_inf.abort_to; 3903 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) 3904 timeout = tgtdev->dev_spec.pcie_inf.reset_to; 3905 } 3906 3907 init_completion(&drv_cmd->done); 3908 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 3909 if (retval) { 3910 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 3911 goto out_unlock; 3912 } 3913 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 3914 3915 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 3916 drv_cmd->is_waiting = 0; 3917 retval = -1; 3918 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 3919 dprint_tm(mrioc, 3920 "task management request timed out after %ld seconds\n", 3921 timeout); 3922 if (mrioc->logging_level & MPI3_DEBUG_TM) 3923 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 3924 mpi3mr_soft_reset_handler(mrioc, 3925 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 3926 } 3927 goto out_unlock; 3928 } 3929 3930 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 3931 dprint_tm(mrioc, "invalid task management reply message\n"); 3932 retval = -1; 3933 goto out_unlock; 3934 } 3935 3936 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 3937 3938 switch (drv_cmd->ioc_status) { 3939 case MPI3_IOCSTATUS_SUCCESS: 3940 *resp_code = le32_to_cpu(tm_reply->response_data) & 3941 MPI3MR_RI_MASK_RESPCODE; 3942 break; 3943 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3944 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 3945 break; 3946 default: 3947 dprint_tm(mrioc, 3948 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 3949 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 3950 retval = -1; 3951 goto out_unlock; 3952 } 3953 3954 switch (*resp_code) { 3955 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3956 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3957 break; 3958 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3959 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3960 retval = -1; 3961 break; 3962 default: 3963 retval = -1; 3964 break; 3965 } 3966 3967 dprint_tm(mrioc, 3968 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 3969 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 3970 le32_to_cpu(tm_reply->termination_count), 3971 mpi3mr_tm_response_name(*resp_code), *resp_code); 3972 3973 if (!retval) { 3974 mpi3mr_ioc_disable_intr(mrioc); 3975 mpi3mr_poll_pend_io_completions(mrioc); 3976 mpi3mr_ioc_enable_intr(mrioc); 3977 mpi3mr_poll_pend_io_completions(mrioc); 3978 mpi3mr_process_admin_reply_q(mrioc); 3979 } 3980 switch (tm_type) { 3981 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3982 if (!scsi_tgt_priv_data) 3983 break; 3984 scsi_tgt_priv_data->pend_count = 0; 3985 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3986 mpi3mr_count_tgt_pending, 3987 (void *)scsi_tgt_priv_data->starget); 3988 break; 3989 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3990 if (!sdev_priv_data) 3991 break; 3992 sdev_priv_data->pend_count = 0; 3993 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3994 mpi3mr_count_dev_pending, (void *)sdev); 3995 break; 3996 default: 3997 break; 3998 } 3999 mpi3mr_global_trigger(mrioc, 4000 MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED); 4001 4002 out_unlock: 4003 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4004 mutex_unlock(&drv_cmd->mutex); 4005 if (scsi_tgt_priv_data) 4006 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 4007 if (tgtdev) 4008 mpi3mr_tgtdev_put(tgtdev); 4009 out: 4010 return retval; 4011 } 4012 4013 /** 4014 * mpi3mr_bios_param - BIOS param callback 4015 * @sdev: SCSI device reference 4016 * @bdev: Block device reference 4017 * @capacity: Capacity in logical sectors 4018 * @params: Parameter array 4019 * 4020 * Just the parameters with heads/secots/cylinders. 4021 * 4022 * Return: 0 always 4023 */ 4024 static int mpi3mr_bios_param(struct scsi_device *sdev, 4025 struct block_device *bdev, sector_t capacity, int params[]) 4026 { 4027 int heads; 4028 int sectors; 4029 sector_t cylinders; 4030 ulong dummy; 4031 4032 heads = 64; 4033 sectors = 32; 4034 4035 dummy = heads * sectors; 4036 cylinders = capacity; 4037 sector_div(cylinders, dummy); 4038 4039 if ((ulong)capacity >= 0x200000) { 4040 heads = 255; 4041 sectors = 63; 4042 dummy = heads * sectors; 4043 cylinders = capacity; 4044 sector_div(cylinders, dummy); 4045 } 4046 4047 params[0] = heads; 4048 params[1] = sectors; 4049 params[2] = cylinders; 4050 return 0; 4051 } 4052 4053 /** 4054 * mpi3mr_map_queues - Map queues callback handler 4055 * @shost: SCSI host reference 4056 * 4057 * Maps default and poll queues. 4058 * 4059 * Return: return zero. 4060 */ 4061 static void mpi3mr_map_queues(struct Scsi_Host *shost) 4062 { 4063 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4064 int i, qoff, offset; 4065 struct blk_mq_queue_map *map = NULL; 4066 4067 offset = mrioc->op_reply_q_offset; 4068 4069 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 4070 map = &shost->tag_set.map[i]; 4071 4072 map->nr_queues = 0; 4073 4074 if (i == HCTX_TYPE_DEFAULT) 4075 map->nr_queues = mrioc->default_qcount; 4076 else if (i == HCTX_TYPE_POLL) 4077 map->nr_queues = mrioc->active_poll_qcount; 4078 4079 if (!map->nr_queues) { 4080 BUG_ON(i == HCTX_TYPE_DEFAULT); 4081 continue; 4082 } 4083 4084 /* 4085 * The poll queue(s) doesn't have an IRQ (and hence IRQ 4086 * affinity), so use the regular blk-mq cpu mapping 4087 */ 4088 map->queue_offset = qoff; 4089 if (i != HCTX_TYPE_POLL) 4090 blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset); 4091 else 4092 blk_mq_map_queues(map); 4093 4094 qoff += map->nr_queues; 4095 offset += map->nr_queues; 4096 } 4097 } 4098 4099 /** 4100 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 4101 * @mrioc: Adapter instance reference 4102 * 4103 * Calculate the pending I/Os for the controller and return. 4104 * 4105 * Return: Number of pending I/Os 4106 */ 4107 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 4108 { 4109 u16 i; 4110 uint pend_ios = 0; 4111 4112 for (i = 0; i < mrioc->num_op_reply_q; i++) 4113 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 4114 return pend_ios; 4115 } 4116 4117 /** 4118 * mpi3mr_print_pending_host_io - print pending I/Os 4119 * @mrioc: Adapter instance reference 4120 * 4121 * Print number of pending I/Os and each I/O details prior to 4122 * reset for debug purpose. 4123 * 4124 * Return: Nothing 4125 */ 4126 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 4127 { 4128 struct Scsi_Host *shost = mrioc->shost; 4129 4130 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 4131 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 4132 blk_mq_tagset_busy_iter(&shost->tag_set, 4133 mpi3mr_print_scmd, (void *)mrioc); 4134 } 4135 4136 /** 4137 * mpi3mr_wait_for_host_io - block for I/Os to complete 4138 * @mrioc: Adapter instance reference 4139 * @timeout: time out in seconds 4140 * Waits for pending I/Os for the given adapter to complete or 4141 * to hit the timeout. 4142 * 4143 * Return: Nothing 4144 */ 4145 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 4146 { 4147 enum mpi3mr_iocstate iocstate; 4148 int i = 0; 4149 4150 iocstate = mpi3mr_get_iocstate(mrioc); 4151 if (iocstate != MRIOC_STATE_READY) 4152 return; 4153 4154 if (!mpi3mr_get_fw_pending_ios(mrioc)) 4155 return; 4156 ioc_info(mrioc, 4157 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 4158 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 4159 4160 for (i = 0; i < timeout; i++) { 4161 if (!mpi3mr_get_fw_pending_ios(mrioc)) 4162 break; 4163 iocstate = mpi3mr_get_iocstate(mrioc); 4164 if (iocstate != MRIOC_STATE_READY) 4165 break; 4166 msleep(1000); 4167 } 4168 4169 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 4170 mpi3mr_get_fw_pending_ios(mrioc)); 4171 } 4172 4173 /** 4174 * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same 4175 * @mrioc: Adapter instance reference 4176 * @scmd: SCSI command reference 4177 * @scsiio_req: MPI3 SCSI IO request 4178 * @scsiio_flags: Pointer to MPI3 SCSI IO Flags 4179 * @wslen: write same max length 4180 * 4181 * Gets values of unmap, ndob and number of blocks from write 4182 * same scsi io and based on these values it sets divert IO flag 4183 * and reason for diverting IO to firmware. 4184 * 4185 * Return: Nothing 4186 */ 4187 static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc, 4188 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req, 4189 u32 *scsiio_flags, u16 wslen) 4190 { 4191 u8 unmap = 0, ndob = 0; 4192 u8 opcode = scmd->cmnd[0]; 4193 u32 num_blocks = 0; 4194 u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]); 4195 4196 if (opcode == WRITE_SAME_16) { 4197 unmap = scmd->cmnd[1] & 0x08; 4198 ndob = scmd->cmnd[1] & 0x01; 4199 num_blocks = get_unaligned_be32(scmd->cmnd + 10); 4200 } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) { 4201 unmap = scmd->cmnd[10] & 0x08; 4202 ndob = scmd->cmnd[10] & 0x01; 4203 num_blocks = get_unaligned_be32(scmd->cmnd + 28); 4204 } else 4205 return; 4206 4207 if ((unmap) && (ndob) && (num_blocks > wslen)) { 4208 scsiio_req->msg_flags |= 4209 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4210 *scsiio_flags |= 4211 MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE; 4212 } 4213 } 4214 4215 /** 4216 * mpi3mr_eh_host_reset - Host reset error handling callback 4217 * @scmd: SCSI command reference 4218 * 4219 * Issue controller reset 4220 * 4221 * Return: SUCCESS of successful reset else FAILED 4222 */ 4223 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 4224 { 4225 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4226 int retval = FAILED, ret; 4227 4228 ret = mpi3mr_soft_reset_handler(mrioc, 4229 MPI3MR_RESET_FROM_EH_HOS, 1); 4230 if (ret) 4231 goto out; 4232 4233 retval = SUCCESS; 4234 out: 4235 sdev_printk(KERN_INFO, scmd->device, 4236 "Host reset is %s for scmd(%p)\n", 4237 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4238 4239 return retval; 4240 } 4241 4242 /** 4243 * mpi3mr_eh_bus_reset - Bus reset error handling callback 4244 * @scmd: SCSI command reference 4245 * 4246 * Checks whether pending I/Os are present for the RAID volume; 4247 * if not there's no need to reset the adapter. 4248 * 4249 * Return: SUCCESS of successful reset else FAILED 4250 */ 4251 static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd) 4252 { 4253 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4254 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4255 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4256 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 4257 int retval = FAILED; 4258 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 4259 4260 sdev_priv_data = scmd->device->hostdata; 4261 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 4262 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4263 dev_type = stgt_priv_data->dev_type; 4264 } 4265 4266 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 4267 mpi3mr_wait_for_host_io(mrioc, 4268 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 4269 if (!mpi3mr_get_fw_pending_ios(mrioc)) { 4270 while (mrioc->reset_in_progress || 4271 mrioc->prepare_for_reset || 4272 mrioc->block_on_pci_err) { 4273 ssleep(1); 4274 if (!timeout--) { 4275 retval = FAILED; 4276 goto out; 4277 } 4278 } 4279 retval = SUCCESS; 4280 goto out; 4281 } 4282 } 4283 if (retval == FAILED) 4284 mpi3mr_print_pending_host_io(mrioc); 4285 4286 out: 4287 sdev_printk(KERN_INFO, scmd->device, 4288 "Bus reset is %s for scmd(%p)\n", 4289 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4290 return retval; 4291 } 4292 4293 /** 4294 * mpi3mr_eh_target_reset - Target reset error handling callback 4295 * @scmd: SCSI command reference 4296 * 4297 * Issue Target reset Task Management and verify the scmd is 4298 * terminated successfully and return status accordingly. 4299 * 4300 * Return: SUCCESS of successful termination of the scmd else 4301 * FAILED 4302 */ 4303 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 4304 { 4305 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4306 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4307 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4308 u16 dev_handle; 4309 u8 resp_code = 0; 4310 int retval = FAILED, ret = 0; 4311 4312 sdev_printk(KERN_INFO, scmd->device, 4313 "Attempting Target Reset! scmd(%p)\n", scmd); 4314 scsi_print_command(scmd); 4315 4316 sdev_priv_data = scmd->device->hostdata; 4317 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4318 sdev_printk(KERN_INFO, scmd->device, 4319 "SCSI device is not available\n"); 4320 retval = SUCCESS; 4321 goto out; 4322 } 4323 4324 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4325 dev_handle = stgt_priv_data->dev_handle; 4326 if (stgt_priv_data->dev_removed) { 4327 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4328 sdev_printk(KERN_INFO, scmd->device, 4329 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 4330 mrioc->name, dev_handle); 4331 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4332 retval = SUCCESS; 4333 else 4334 retval = FAILED; 4335 goto out; 4336 } 4337 sdev_printk(KERN_INFO, scmd->device, 4338 "Target Reset is issued to handle(0x%04x)\n", 4339 dev_handle); 4340 4341 ret = mpi3mr_issue_tm(mrioc, 4342 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 4343 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4344 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4345 4346 if (ret) 4347 goto out; 4348 4349 if (stgt_priv_data->pend_count) { 4350 sdev_printk(KERN_INFO, scmd->device, 4351 "%s: target has %d pending commands, target reset is failed\n", 4352 mrioc->name, stgt_priv_data->pend_count); 4353 goto out; 4354 } 4355 4356 retval = SUCCESS; 4357 out: 4358 sdev_printk(KERN_INFO, scmd->device, 4359 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 4360 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4361 4362 return retval; 4363 } 4364 4365 /** 4366 * mpi3mr_eh_dev_reset- Device reset error handling callback 4367 * @scmd: SCSI command reference 4368 * 4369 * Issue lun reset Task Management and verify the scmd is 4370 * terminated successfully and return status accordingly. 4371 * 4372 * Return: SUCCESS of successful termination of the scmd else 4373 * FAILED 4374 */ 4375 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 4376 { 4377 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4378 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4379 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4380 u16 dev_handle; 4381 u8 resp_code = 0; 4382 int retval = FAILED, ret = 0; 4383 4384 sdev_printk(KERN_INFO, scmd->device, 4385 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 4386 scsi_print_command(scmd); 4387 4388 sdev_priv_data = scmd->device->hostdata; 4389 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4390 sdev_printk(KERN_INFO, scmd->device, 4391 "SCSI device is not available\n"); 4392 retval = SUCCESS; 4393 goto out; 4394 } 4395 4396 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4397 dev_handle = stgt_priv_data->dev_handle; 4398 if (stgt_priv_data->dev_removed) { 4399 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4400 sdev_printk(KERN_INFO, scmd->device, 4401 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 4402 mrioc->name, dev_handle); 4403 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4404 retval = SUCCESS; 4405 else 4406 retval = FAILED; 4407 goto out; 4408 } 4409 sdev_printk(KERN_INFO, scmd->device, 4410 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 4411 4412 ret = mpi3mr_issue_tm(mrioc, 4413 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 4414 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4415 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4416 4417 if (ret) 4418 goto out; 4419 4420 if (sdev_priv_data->pend_count) { 4421 sdev_printk(KERN_INFO, scmd->device, 4422 "%s: device has %d pending commands, device(LUN) reset is failed\n", 4423 mrioc->name, sdev_priv_data->pend_count); 4424 goto out; 4425 } 4426 retval = SUCCESS; 4427 out: 4428 sdev_printk(KERN_INFO, scmd->device, 4429 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 4430 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4431 4432 return retval; 4433 } 4434 4435 /** 4436 * mpi3mr_eh_abort - Callback function for abort error handling 4437 * @scmd: SCSI command reference 4438 * 4439 * Issues Abort Task Management if the command is in LLD scope 4440 * and verifies if it is aborted successfully, and return status 4441 * accordingly. 4442 * 4443 * Return: SUCCESS if the abort was successful, otherwise FAILED 4444 */ 4445 static int mpi3mr_eh_abort(struct scsi_cmnd *scmd) 4446 { 4447 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4448 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4449 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4450 struct scmd_priv *cmd_priv; 4451 u16 dev_handle, timeout = MPI3MR_ABORTTM_TIMEOUT; 4452 u8 resp_code = 0; 4453 int retval = FAILED, ret = 0; 4454 struct request *rq = scsi_cmd_to_rq(scmd); 4455 unsigned long scmd_age_ms = jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc); 4456 unsigned long scmd_age_sec = scmd_age_ms / HZ; 4457 4458 sdev_printk(KERN_INFO, scmd->device, 4459 "%s: attempting abort task for scmd(%p)\n", mrioc->name, scmd); 4460 4461 sdev_printk(KERN_INFO, scmd->device, 4462 "%s: scmd(0x%p) is outstanding for %lus %lums, timeout %us, retries %d, allowed %d\n", 4463 mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ, 4464 scmd->retries, scmd->allowed); 4465 4466 scsi_print_command(scmd); 4467 4468 sdev_priv_data = scmd->device->hostdata; 4469 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4470 sdev_printk(KERN_INFO, scmd->device, 4471 "%s: Device not available, Skip issuing abort task\n", 4472 mrioc->name); 4473 retval = SUCCESS; 4474 goto out; 4475 } 4476 4477 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4478 dev_handle = stgt_priv_data->dev_handle; 4479 4480 cmd_priv = scsi_cmd_priv(scmd); 4481 if (!cmd_priv->in_lld_scope || 4482 cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) { 4483 sdev_printk(KERN_INFO, scmd->device, 4484 "%s: scmd (0x%p) not in LLD scope, Skip issuing Abort Task\n", 4485 mrioc->name, scmd); 4486 retval = SUCCESS; 4487 goto out; 4488 } 4489 4490 if (stgt_priv_data->dev_removed) { 4491 sdev_printk(KERN_INFO, scmd->device, 4492 "%s: Device (handle = 0x%04x) removed, Skip issuing Abort Task\n", 4493 mrioc->name, dev_handle); 4494 retval = FAILED; 4495 goto out; 4496 } 4497 4498 ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4499 dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4500 timeout, &mrioc->host_tm_cmds, &resp_code, scmd); 4501 4502 if (ret) 4503 goto out; 4504 4505 if (cmd_priv->in_lld_scope) { 4506 sdev_printk(KERN_INFO, scmd->device, 4507 "%s: Abort task failed. scmd (0x%p) was not terminated\n", 4508 mrioc->name, scmd); 4509 goto out; 4510 } 4511 4512 retval = SUCCESS; 4513 out: 4514 sdev_printk(KERN_INFO, scmd->device, 4515 "%s: Abort Task %s for scmd (0x%p)\n", mrioc->name, 4516 ((retval == SUCCESS) ? "SUCCEEDED" : "FAILED"), scmd); 4517 4518 return retval; 4519 } 4520 4521 /** 4522 * mpi3mr_scan_start - Scan start callback handler 4523 * @shost: SCSI host reference 4524 * 4525 * Issue port enable request asynchronously. 4526 * 4527 * Return: Nothing 4528 */ 4529 static void mpi3mr_scan_start(struct Scsi_Host *shost) 4530 { 4531 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4532 4533 mrioc->scan_started = 1; 4534 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 4535 if (mpi3mr_issue_port_enable(mrioc, 1)) { 4536 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 4537 mrioc->scan_started = 0; 4538 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4539 } 4540 } 4541 4542 /** 4543 * mpi3mr_scan_finished - Scan finished callback handler 4544 * @shost: SCSI host reference 4545 * @time: Jiffies from the scan start 4546 * 4547 * Checks whether the port enable is completed or timedout or 4548 * failed and set the scan status accordingly after taking any 4549 * recovery if required. 4550 * 4551 * Return: 1 on scan finished or timed out, 0 for in progress 4552 */ 4553 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 4554 unsigned long time) 4555 { 4556 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4557 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 4558 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4559 4560 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4561 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4562 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 4563 mpi3mr_print_fault_info(mrioc); 4564 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4565 mrioc->scan_started = 0; 4566 mrioc->init_cmds.is_waiting = 0; 4567 mrioc->init_cmds.callback = NULL; 4568 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4569 } 4570 4571 if (time >= (pe_timeout * HZ)) { 4572 ioc_err(mrioc, "port enable failed due to time out\n"); 4573 mpi3mr_check_rh_fault_ioc(mrioc, 4574 MPI3MR_RESET_FROM_PE_TIMEOUT); 4575 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4576 mrioc->scan_started = 0; 4577 mrioc->init_cmds.is_waiting = 0; 4578 mrioc->init_cmds.callback = NULL; 4579 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4580 } 4581 4582 if (mrioc->scan_started) 4583 return 0; 4584 4585 if (mrioc->scan_failed) { 4586 ioc_err(mrioc, 4587 "port enable failed with status=0x%04x\n", 4588 mrioc->scan_failed); 4589 } else 4590 ioc_info(mrioc, "port enable is successfully completed\n"); 4591 4592 mpi3mr_start_watchdog(mrioc); 4593 mrioc->is_driver_loading = 0; 4594 mrioc->stop_bsgs = 0; 4595 return 1; 4596 } 4597 4598 /** 4599 * mpi3mr_sdev_destroy - Slave destroy callback handler 4600 * @sdev: SCSI device reference 4601 * 4602 * Cleanup and free per device(lun) private data. 4603 * 4604 * Return: Nothing. 4605 */ 4606 static void mpi3mr_sdev_destroy(struct scsi_device *sdev) 4607 { 4608 struct Scsi_Host *shost; 4609 struct mpi3mr_ioc *mrioc; 4610 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4611 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4612 unsigned long flags; 4613 struct scsi_target *starget; 4614 struct sas_rphy *rphy = NULL; 4615 4616 if (!sdev->hostdata) 4617 return; 4618 4619 starget = scsi_target(sdev); 4620 shost = dev_to_shost(&starget->dev); 4621 mrioc = shost_priv(shost); 4622 scsi_tgt_priv_data = starget->hostdata; 4623 4624 scsi_tgt_priv_data->num_luns--; 4625 4626 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4627 if (starget->channel == mrioc->scsi_device_channel) 4628 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4629 else if (mrioc->sas_transport_enabled && !starget->channel) { 4630 rphy = dev_to_rphy(starget->dev.parent); 4631 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4632 rphy->identify.sas_address, rphy); 4633 } 4634 4635 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 4636 tgt_dev->starget = NULL; 4637 if (tgt_dev) 4638 mpi3mr_tgtdev_put(tgt_dev); 4639 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4640 4641 kfree(sdev->hostdata); 4642 sdev->hostdata = NULL; 4643 } 4644 4645 /** 4646 * mpi3mr_target_destroy - Target destroy callback handler 4647 * @starget: SCSI target reference 4648 * 4649 * Cleanup and free per target private data. 4650 * 4651 * Return: Nothing. 4652 */ 4653 static void mpi3mr_target_destroy(struct scsi_target *starget) 4654 { 4655 struct Scsi_Host *shost; 4656 struct mpi3mr_ioc *mrioc; 4657 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4658 struct mpi3mr_tgt_dev *tgt_dev; 4659 unsigned long flags; 4660 4661 if (!starget->hostdata) 4662 return; 4663 4664 shost = dev_to_shost(&starget->dev); 4665 mrioc = shost_priv(shost); 4666 scsi_tgt_priv_data = starget->hostdata; 4667 4668 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4669 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 4670 if (tgt_dev && (tgt_dev->starget == starget) && 4671 (tgt_dev->perst_id == starget->id)) 4672 tgt_dev->starget = NULL; 4673 if (tgt_dev) { 4674 scsi_tgt_priv_data->tgt_dev = NULL; 4675 scsi_tgt_priv_data->perst_id = 0; 4676 mpi3mr_tgtdev_put(tgt_dev); 4677 mpi3mr_tgtdev_put(tgt_dev); 4678 } 4679 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4680 4681 kfree(starget->hostdata); 4682 starget->hostdata = NULL; 4683 } 4684 4685 /** 4686 * mpi3mr_sdev_configure - Slave configure callback handler 4687 * @sdev: SCSI device reference 4688 * @lim: queue limits 4689 * 4690 * Configure queue depth, max hardware sectors and virt boundary 4691 * as required 4692 * 4693 * Return: 0 always. 4694 */ 4695 static int mpi3mr_sdev_configure(struct scsi_device *sdev, 4696 struct queue_limits *lim) 4697 { 4698 struct scsi_target *starget; 4699 struct Scsi_Host *shost; 4700 struct mpi3mr_ioc *mrioc; 4701 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4702 unsigned long flags; 4703 int retval = 0; 4704 struct sas_rphy *rphy = NULL; 4705 4706 starget = scsi_target(sdev); 4707 shost = dev_to_shost(&starget->dev); 4708 mrioc = shost_priv(shost); 4709 4710 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4711 if (starget->channel == mrioc->scsi_device_channel) 4712 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4713 else if (mrioc->sas_transport_enabled && !starget->channel) { 4714 rphy = dev_to_rphy(starget->dev.parent); 4715 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4716 rphy->identify.sas_address, rphy); 4717 } 4718 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4719 if (!tgt_dev) 4720 return -ENXIO; 4721 4722 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 4723 4724 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 4725 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 4726 4727 mpi3mr_configure_tgt_dev(tgt_dev, lim); 4728 mpi3mr_tgtdev_put(tgt_dev); 4729 return retval; 4730 } 4731 4732 /** 4733 * mpi3mr_sdev_init -Slave alloc callback handler 4734 * @sdev: SCSI device reference 4735 * 4736 * Allocate per device(lun) private data and initialize it. 4737 * 4738 * Return: 0 on success -ENOMEM on memory allocation failure. 4739 */ 4740 static int mpi3mr_sdev_init(struct scsi_device *sdev) 4741 { 4742 struct Scsi_Host *shost; 4743 struct mpi3mr_ioc *mrioc; 4744 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4745 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4746 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 4747 unsigned long flags; 4748 struct scsi_target *starget; 4749 int retval = 0; 4750 struct sas_rphy *rphy = NULL; 4751 4752 starget = scsi_target(sdev); 4753 shost = dev_to_shost(&starget->dev); 4754 mrioc = shost_priv(shost); 4755 scsi_tgt_priv_data = starget->hostdata; 4756 4757 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4758 4759 if (starget->channel == mrioc->scsi_device_channel) 4760 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4761 else if (mrioc->sas_transport_enabled && !starget->channel) { 4762 rphy = dev_to_rphy(starget->dev.parent); 4763 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4764 rphy->identify.sas_address, rphy); 4765 } 4766 4767 if (tgt_dev) { 4768 if (tgt_dev->starget == NULL) 4769 tgt_dev->starget = starget; 4770 mpi3mr_tgtdev_put(tgt_dev); 4771 retval = 0; 4772 } else { 4773 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4774 return -ENXIO; 4775 } 4776 4777 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4778 4779 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 4780 if (!scsi_dev_priv_data) 4781 return -ENOMEM; 4782 4783 scsi_dev_priv_data->lun_id = sdev->lun; 4784 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 4785 sdev->hostdata = scsi_dev_priv_data; 4786 4787 scsi_tgt_priv_data->num_luns++; 4788 4789 return retval; 4790 } 4791 4792 /** 4793 * mpi3mr_target_alloc - Target alloc callback handler 4794 * @starget: SCSI target reference 4795 * 4796 * Allocate per target private data and initialize it. 4797 * 4798 * Return: 0 on success -ENOMEM on memory allocation failure. 4799 */ 4800 static int mpi3mr_target_alloc(struct scsi_target *starget) 4801 { 4802 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4803 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4804 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4805 struct mpi3mr_tgt_dev *tgt_dev; 4806 unsigned long flags; 4807 int retval = 0; 4808 struct sas_rphy *rphy = NULL; 4809 4810 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 4811 if (!scsi_tgt_priv_data) 4812 return -ENOMEM; 4813 4814 starget->hostdata = scsi_tgt_priv_data; 4815 4816 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4817 if (starget->channel == mrioc->scsi_device_channel) { 4818 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4819 if (tgt_dev && !tgt_dev->is_hidden) { 4820 scsi_tgt_priv_data->starget = starget; 4821 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4822 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4823 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4824 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4825 tgt_dev->starget = starget; 4826 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4827 retval = 0; 4828 if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 4829 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4830 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4831 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 4832 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4833 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) != 4834 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0)) 4835 scsi_tgt_priv_data->dev_nvme_dif = 1; 4836 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4837 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4838 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4839 scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg; 4840 } else 4841 retval = -ENXIO; 4842 } else if (mrioc->sas_transport_enabled && !starget->channel) { 4843 rphy = dev_to_rphy(starget->dev.parent); 4844 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4845 rphy->identify.sas_address, rphy); 4846 if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && 4847 (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 4848 scsi_tgt_priv_data->starget = starget; 4849 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4850 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4851 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4852 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4853 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4854 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4855 tgt_dev->starget = starget; 4856 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4857 retval = 0; 4858 } else 4859 retval = -ENXIO; 4860 } 4861 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4862 4863 return retval; 4864 } 4865 4866 /** 4867 * mpi3mr_check_return_unmap - Whether an unmap is allowed 4868 * @mrioc: Adapter instance reference 4869 * @scmd: SCSI Command reference 4870 * 4871 * The controller hardware cannot handle certain unmap commands 4872 * for NVMe drives, this routine checks those and return true 4873 * and completes the SCSI command with proper status and sense 4874 * data. 4875 * 4876 * Return: TRUE for not allowed unmap, FALSE otherwise. 4877 */ 4878 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 4879 struct scsi_cmnd *scmd) 4880 { 4881 unsigned char *buf; 4882 u16 param_len, desc_len, trunc_param_len; 4883 4884 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 4885 4886 if (mrioc->pdev->revision) { 4887 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 4888 trunc_param_len -= (param_len - 8) & 0xF; 4889 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4890 dprint_scsi_err(mrioc, 4891 "truncating param_len from (%d) to (%d)\n", 4892 param_len, trunc_param_len); 4893 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4894 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4895 } 4896 return false; 4897 } 4898 4899 if (!param_len) { 4900 ioc_warn(mrioc, 4901 "%s: cdb received with zero parameter length\n", 4902 __func__); 4903 scsi_print_command(scmd); 4904 scmd->result = DID_OK << 16; 4905 scsi_done(scmd); 4906 return true; 4907 } 4908 4909 if (param_len < 24) { 4910 ioc_warn(mrioc, 4911 "%s: cdb received with invalid param_len: %d\n", 4912 __func__, param_len); 4913 scsi_print_command(scmd); 4914 scmd->result = SAM_STAT_CHECK_CONDITION; 4915 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4916 0x1A, 0); 4917 scsi_done(scmd); 4918 return true; 4919 } 4920 if (param_len != scsi_bufflen(scmd)) { 4921 ioc_warn(mrioc, 4922 "%s: cdb received with param_len: %d bufflen: %d\n", 4923 __func__, param_len, scsi_bufflen(scmd)); 4924 scsi_print_command(scmd); 4925 scmd->result = SAM_STAT_CHECK_CONDITION; 4926 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4927 0x1A, 0); 4928 scsi_done(scmd); 4929 return true; 4930 } 4931 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 4932 if (!buf) { 4933 scsi_print_command(scmd); 4934 scmd->result = SAM_STAT_CHECK_CONDITION; 4935 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4936 0x55, 0x03); 4937 scsi_done(scmd); 4938 return true; 4939 } 4940 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 4941 desc_len = get_unaligned_be16(&buf[2]); 4942 4943 if (desc_len < 16) { 4944 ioc_warn(mrioc, 4945 "%s: Invalid descriptor length in param list: %d\n", 4946 __func__, desc_len); 4947 scsi_print_command(scmd); 4948 scmd->result = SAM_STAT_CHECK_CONDITION; 4949 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4950 0x26, 0); 4951 scsi_done(scmd); 4952 kfree(buf); 4953 return true; 4954 } 4955 4956 if (param_len > (desc_len + 8)) { 4957 trunc_param_len = desc_len + 8; 4958 scsi_print_command(scmd); 4959 dprint_scsi_err(mrioc, 4960 "truncating param_len(%d) to desc_len+8(%d)\n", 4961 param_len, trunc_param_len); 4962 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4963 scsi_print_command(scmd); 4964 } 4965 4966 kfree(buf); 4967 return false; 4968 } 4969 4970 /** 4971 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 4972 * @scmd: SCSI Command reference 4973 * 4974 * Checks whether a cdb is allowed during shutdown or not. 4975 * 4976 * Return: TRUE for allowed commands, FALSE otherwise. 4977 */ 4978 4979 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 4980 { 4981 switch (scmd->cmnd[0]) { 4982 case SYNCHRONIZE_CACHE: 4983 case START_STOP: 4984 return true; 4985 default: 4986 return false; 4987 } 4988 } 4989 4990 /** 4991 * mpi3mr_qcmd - I/O request despatcher 4992 * @shost: SCSI Host reference 4993 * @scmd: SCSI Command reference 4994 * 4995 * Issues the SCSI Command as an MPI3 request. 4996 * 4997 * Return: 0 on successful queueing of the request or if the 4998 * request is completed with failure. 4999 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 5000 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 5001 */ 5002 static int mpi3mr_qcmd(struct Scsi_Host *shost, 5003 struct scsi_cmnd *scmd) 5004 { 5005 struct mpi3mr_ioc *mrioc = shost_priv(shost); 5006 struct mpi3mr_stgt_priv_data *stgt_priv_data; 5007 struct mpi3mr_sdev_priv_data *sdev_priv_data; 5008 struct scmd_priv *scmd_priv_data = NULL; 5009 struct mpi3_scsi_io_request *scsiio_req = NULL; 5010 struct op_req_qinfo *op_req_q = NULL; 5011 int retval = 0; 5012 u16 dev_handle; 5013 u16 host_tag; 5014 u32 scsiio_flags = 0, data_len_blks = 0; 5015 struct request *rq = scsi_cmd_to_rq(scmd); 5016 int iprio_class; 5017 u8 is_pcie_dev = 0; 5018 u32 tracked_io_sz = 0; 5019 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; 5020 struct mpi3mr_throttle_group_info *tg = NULL; 5021 5022 if (mrioc->unrecoverable) { 5023 scmd->result = DID_ERROR << 16; 5024 scsi_done(scmd); 5025 goto out; 5026 } 5027 5028 sdev_priv_data = scmd->device->hostdata; 5029 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 5030 scmd->result = DID_NO_CONNECT << 16; 5031 scsi_done(scmd); 5032 goto out; 5033 } 5034 5035 if (mrioc->stop_drv_processing && 5036 !(mpi3mr_allow_scmd_to_fw(scmd))) { 5037 scmd->result = DID_NO_CONNECT << 16; 5038 scsi_done(scmd); 5039 goto out; 5040 } 5041 5042 stgt_priv_data = sdev_priv_data->tgt_priv_data; 5043 dev_handle = stgt_priv_data->dev_handle; 5044 5045 /* Avoid error handling escalation when device is removed or blocked */ 5046 5047 if (scmd->device->host->shost_state == SHOST_RECOVERY && 5048 scmd->cmnd[0] == TEST_UNIT_READY && 5049 (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { 5050 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 5051 scsi_done(scmd); 5052 goto out; 5053 } 5054 5055 if (mrioc->reset_in_progress || mrioc->prepare_for_reset 5056 || mrioc->block_on_pci_err) { 5057 retval = SCSI_MLQUEUE_HOST_BUSY; 5058 goto out; 5059 } 5060 5061 if (atomic_read(&stgt_priv_data->block_io)) { 5062 if (mrioc->stop_drv_processing) { 5063 scmd->result = DID_NO_CONNECT << 16; 5064 scsi_done(scmd); 5065 goto out; 5066 } 5067 retval = SCSI_MLQUEUE_DEVICE_BUSY; 5068 goto out; 5069 } 5070 5071 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 5072 scmd->result = DID_NO_CONNECT << 16; 5073 scsi_done(scmd); 5074 goto out; 5075 } 5076 if (stgt_priv_data->dev_removed) { 5077 scmd->result = DID_NO_CONNECT << 16; 5078 scsi_done(scmd); 5079 goto out; 5080 } 5081 5082 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 5083 is_pcie_dev = 1; 5084 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 5085 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5086 mpi3mr_check_return_unmap(mrioc, scmd)) 5087 goto out; 5088 5089 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 5090 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 5091 scmd->result = DID_ERROR << 16; 5092 scsi_done(scmd); 5093 goto out; 5094 } 5095 5096 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 5097 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 5098 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 5099 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 5100 else 5101 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 5102 5103 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 5104 5105 if (sdev_priv_data->ncq_prio_enable) { 5106 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 5107 if (iprio_class == IOPRIO_CLASS_RT) 5108 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 5109 } 5110 5111 if (scmd->cmd_len > 16) 5112 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 5113 5114 scmd_priv_data = scsi_cmd_priv(scmd); 5115 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 5116 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 5117 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 5118 scsiio_req->host_tag = cpu_to_le16(host_tag); 5119 5120 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 5121 5122 if (stgt_priv_data->wslen) 5123 mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags, 5124 stgt_priv_data->wslen); 5125 5126 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 5127 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 5128 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 5129 scsiio_req->flags = cpu_to_le32(scsiio_flags); 5130 int_to_scsilun(sdev_priv_data->lun_id, 5131 (struct scsi_lun *)scsiio_req->lun); 5132 5133 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 5134 mpi3mr_clear_scmd_priv(mrioc, scmd); 5135 retval = SCSI_MLQUEUE_HOST_BUSY; 5136 goto out; 5137 } 5138 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 5139 data_len_blks = scsi_bufflen(scmd) >> 9; 5140 if ((data_len_blks >= mrioc->io_throttle_data_length) && 5141 stgt_priv_data->io_throttle_enabled) { 5142 tracked_io_sz = data_len_blks; 5143 tg = stgt_priv_data->throttle_group; 5144 if (tg) { 5145 ioc_pend_data_len = atomic_add_return(data_len_blks, 5146 &mrioc->pend_large_data_sz); 5147 tg_pend_data_len = atomic_add_return(data_len_blks, 5148 &tg->pend_large_data_sz); 5149 if (!tg->io_divert && ((ioc_pend_data_len >= 5150 mrioc->io_throttle_high) || 5151 (tg_pend_data_len >= tg->high))) { 5152 tg->io_divert = 1; 5153 tg->need_qd_reduction = 1; 5154 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, 5155 tg, 1); 5156 mpi3mr_queue_qd_reduction_event(mrioc, tg); 5157 } 5158 } else { 5159 ioc_pend_data_len = atomic_add_return(data_len_blks, 5160 &mrioc->pend_large_data_sz); 5161 if (ioc_pend_data_len >= mrioc->io_throttle_high) 5162 stgt_priv_data->io_divert = 1; 5163 } 5164 } 5165 5166 if (stgt_priv_data->io_divert) { 5167 scsiio_req->msg_flags |= 5168 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 5169 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; 5170 } 5171 scsiio_req->flags |= cpu_to_le32(scsiio_flags); 5172 5173 if (mpi3mr_op_request_post(mrioc, op_req_q, 5174 scmd_priv_data->mpi3mr_scsiio_req)) { 5175 mpi3mr_clear_scmd_priv(mrioc, scmd); 5176 retval = SCSI_MLQUEUE_HOST_BUSY; 5177 if (tracked_io_sz) { 5178 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); 5179 if (tg) 5180 atomic_sub(tracked_io_sz, 5181 &tg->pend_large_data_sz); 5182 } 5183 goto out; 5184 } 5185 5186 out: 5187 return retval; 5188 } 5189 5190 static const struct scsi_host_template mpi3mr_driver_template = { 5191 .module = THIS_MODULE, 5192 .name = "MPI3 Storage Controller", 5193 .proc_name = MPI3MR_DRIVER_NAME, 5194 .queuecommand = mpi3mr_qcmd, 5195 .target_alloc = mpi3mr_target_alloc, 5196 .sdev_init = mpi3mr_sdev_init, 5197 .sdev_configure = mpi3mr_sdev_configure, 5198 .target_destroy = mpi3mr_target_destroy, 5199 .sdev_destroy = mpi3mr_sdev_destroy, 5200 .scan_finished = mpi3mr_scan_finished, 5201 .scan_start = mpi3mr_scan_start, 5202 .change_queue_depth = mpi3mr_change_queue_depth, 5203 .eh_abort_handler = mpi3mr_eh_abort, 5204 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 5205 .eh_target_reset_handler = mpi3mr_eh_target_reset, 5206 .eh_bus_reset_handler = mpi3mr_eh_bus_reset, 5207 .eh_host_reset_handler = mpi3mr_eh_host_reset, 5208 .bios_param = mpi3mr_bios_param, 5209 .map_queues = mpi3mr_map_queues, 5210 .mq_poll = mpi3mr_blk_mq_poll, 5211 .no_write_same = 1, 5212 .can_queue = 1, 5213 .this_id = -1, 5214 .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES, 5215 /* max xfer supported is 1M (2K in 512 byte sized sectors) 5216 */ 5217 .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512), 5218 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 5219 .max_segment_size = 0xffffffff, 5220 .track_queue_depth = 1, 5221 .cmd_size = sizeof(struct scmd_priv), 5222 .shost_groups = mpi3mr_host_groups, 5223 .sdev_groups = mpi3mr_dev_groups, 5224 }; 5225 5226 /** 5227 * mpi3mr_init_drv_cmd - Initialize internal command tracker 5228 * @cmdptr: Internal command tracker 5229 * @host_tag: Host tag used for the specific command 5230 * 5231 * Initialize the internal command tracker structure with 5232 * specified host tag. 5233 * 5234 * Return: Nothing. 5235 */ 5236 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 5237 u16 host_tag) 5238 { 5239 mutex_init(&cmdptr->mutex); 5240 cmdptr->reply = NULL; 5241 cmdptr->state = MPI3MR_CMD_NOTUSED; 5242 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 5243 cmdptr->host_tag = host_tag; 5244 } 5245 5246 /** 5247 * osintfc_mrioc_security_status -Check controller secure status 5248 * @pdev: PCI device instance 5249 * 5250 * Read the Device Serial Number capability from PCI config 5251 * space and decide whether the controller is secure or not. 5252 * 5253 * Return: 0 on success, non-zero on failure. 5254 */ 5255 static int 5256 osintfc_mrioc_security_status(struct pci_dev *pdev) 5257 { 5258 u32 cap_data; 5259 int base; 5260 u32 ctlr_status; 5261 u32 debug_status; 5262 int retval = 0; 5263 5264 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 5265 if (!base) { 5266 dev_err(&pdev->dev, 5267 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 5268 return -1; 5269 } 5270 5271 pci_read_config_dword(pdev, base + 4, &cap_data); 5272 5273 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 5274 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 5275 5276 switch (ctlr_status) { 5277 case MPI3MR_INVALID_DEVICE: 5278 dev_err(&pdev->dev, 5279 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5280 __func__, pdev->device, pdev->subsystem_vendor, 5281 pdev->subsystem_device); 5282 retval = -1; 5283 break; 5284 case MPI3MR_CONFIG_SECURE_DEVICE: 5285 if (!debug_status) 5286 dev_info(&pdev->dev, 5287 "%s: Config secure ctlr is detected\n", 5288 __func__); 5289 break; 5290 case MPI3MR_HARD_SECURE_DEVICE: 5291 break; 5292 case MPI3MR_TAMPERED_DEVICE: 5293 dev_err(&pdev->dev, 5294 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5295 __func__, pdev->device, pdev->subsystem_vendor, 5296 pdev->subsystem_device); 5297 retval = -1; 5298 break; 5299 default: 5300 retval = -1; 5301 break; 5302 } 5303 5304 if (!retval && debug_status) { 5305 dev_err(&pdev->dev, 5306 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5307 __func__, pdev->device, pdev->subsystem_vendor, 5308 pdev->subsystem_device); 5309 retval = -1; 5310 } 5311 5312 return retval; 5313 } 5314 5315 /** 5316 * mpi3mr_probe - PCI probe callback 5317 * @pdev: PCI device instance 5318 * @id: PCI device ID details 5319 * 5320 * controller initialization routine. Checks the security status 5321 * of the controller and if it is invalid or tampered return the 5322 * probe without initializing the controller. Otherwise, 5323 * allocate per adapter instance through shost_priv and 5324 * initialize controller specific data structures, initializae 5325 * the controller hardware, add shost to the SCSI subsystem. 5326 * 5327 * Return: 0 on success, non-zero on failure. 5328 */ 5329 5330 static int 5331 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5332 { 5333 struct mpi3mr_ioc *mrioc = NULL; 5334 struct Scsi_Host *shost = NULL; 5335 int retval = 0, i; 5336 5337 if (osintfc_mrioc_security_status(pdev)) { 5338 warn_non_secure_ctlr = 1; 5339 return 1; /* For Invalid and Tampered device */ 5340 } 5341 5342 shost = scsi_host_alloc(&mpi3mr_driver_template, 5343 sizeof(struct mpi3mr_ioc)); 5344 if (!shost) { 5345 retval = -ENODEV; 5346 goto shost_failed; 5347 } 5348 5349 mrioc = shost_priv(shost); 5350 retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL); 5351 if (retval < 0) 5352 goto id_alloc_failed; 5353 mrioc->id = (u8)retval; 5354 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 5355 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 5356 INIT_LIST_HEAD(&mrioc->list); 5357 spin_lock(&mrioc_list_lock); 5358 list_add_tail(&mrioc->list, &mrioc_list); 5359 spin_unlock(&mrioc_list_lock); 5360 5361 spin_lock_init(&mrioc->admin_req_lock); 5362 spin_lock_init(&mrioc->reply_free_queue_lock); 5363 spin_lock_init(&mrioc->sbq_lock); 5364 spin_lock_init(&mrioc->fwevt_lock); 5365 spin_lock_init(&mrioc->tgtdev_lock); 5366 spin_lock_init(&mrioc->watchdog_lock); 5367 spin_lock_init(&mrioc->chain_buf_lock); 5368 spin_lock_init(&mrioc->sas_node_lock); 5369 spin_lock_init(&mrioc->trigger_lock); 5370 5371 INIT_LIST_HEAD(&mrioc->fwevt_list); 5372 INIT_LIST_HEAD(&mrioc->tgtdev_list); 5373 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 5374 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 5375 INIT_LIST_HEAD(&mrioc->sas_expander_list); 5376 INIT_LIST_HEAD(&mrioc->hba_port_table_list); 5377 INIT_LIST_HEAD(&mrioc->enclosure_list); 5378 5379 mutex_init(&mrioc->reset_mutex); 5380 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 5381 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 5382 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 5383 mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); 5384 mpi3mr_init_drv_cmd(&mrioc->transport_cmds, 5385 MPI3MR_HOSTTAG_TRANSPORT_CMDS); 5386 5387 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 5388 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 5389 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 5390 5391 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 5392 mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], 5393 MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); 5394 5395 if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5396 !pdev->revision) 5397 mrioc->enable_segqueue = false; 5398 else 5399 mrioc->enable_segqueue = true; 5400 5401 init_waitqueue_head(&mrioc->reset_waitq); 5402 mrioc->logging_level = logging_level; 5403 mrioc->shost = shost; 5404 mrioc->pdev = pdev; 5405 mrioc->stop_bsgs = 1; 5406 5407 mrioc->max_sgl_entries = max_sgl_entries; 5408 if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES) 5409 mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES; 5410 else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES) 5411 mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 5412 else { 5413 mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES; 5414 mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES; 5415 } 5416 5417 /* init shost parameters */ 5418 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 5419 shost->max_lun = -1; 5420 shost->unique_id = mrioc->id; 5421 5422 shost->max_channel = 0; 5423 shost->max_id = 0xFFFFFFFF; 5424 5425 shost->host_tagset = 1; 5426 5427 if (prot_mask >= 0) 5428 scsi_host_set_prot(shost, prot_mask); 5429 else { 5430 prot_mask = SHOST_DIF_TYPE1_PROTECTION 5431 | SHOST_DIF_TYPE2_PROTECTION 5432 | SHOST_DIF_TYPE3_PROTECTION; 5433 scsi_host_set_prot(shost, prot_mask); 5434 } 5435 5436 ioc_info(mrioc, 5437 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 5438 __func__, 5439 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5440 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5441 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5442 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5443 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5444 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5445 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5446 5447 if (prot_guard_mask) 5448 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 5449 else 5450 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 5451 5452 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 5453 "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id); 5454 if (!mrioc->fwevt_worker_thread) { 5455 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5456 __FILE__, __LINE__, __func__); 5457 retval = -ENODEV; 5458 goto fwevtthread_failed; 5459 } 5460 5461 mrioc->is_driver_loading = 1; 5462 mrioc->cpu_count = num_online_cpus(); 5463 if (mpi3mr_setup_resources(mrioc)) { 5464 ioc_err(mrioc, "setup resources failed\n"); 5465 retval = -ENODEV; 5466 goto resource_alloc_failed; 5467 } 5468 if (mpi3mr_init_ioc(mrioc)) { 5469 ioc_err(mrioc, "initializing IOC failed\n"); 5470 retval = -ENODEV; 5471 goto init_ioc_failed; 5472 } 5473 5474 shost->nr_hw_queues = mrioc->num_op_reply_q; 5475 if (mrioc->active_poll_qcount) 5476 shost->nr_maps = 3; 5477 5478 shost->can_queue = mrioc->max_host_ios; 5479 shost->sg_tablesize = mrioc->max_sgl_entries; 5480 shost->max_id = mrioc->facts.max_perids + 1; 5481 5482 retval = scsi_add_host(shost, &pdev->dev); 5483 if (retval) { 5484 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5485 __FILE__, __LINE__, __func__); 5486 goto addhost_failed; 5487 } 5488 5489 scsi_scan_host(shost); 5490 mpi3mr_bsg_init(mrioc); 5491 return retval; 5492 5493 addhost_failed: 5494 mpi3mr_stop_watchdog(mrioc); 5495 mpi3mr_cleanup_ioc(mrioc); 5496 init_ioc_failed: 5497 mpi3mr_free_mem(mrioc); 5498 mpi3mr_cleanup_resources(mrioc); 5499 resource_alloc_failed: 5500 destroy_workqueue(mrioc->fwevt_worker_thread); 5501 fwevtthread_failed: 5502 ida_free(&mrioc_ida, mrioc->id); 5503 spin_lock(&mrioc_list_lock); 5504 list_del(&mrioc->list); 5505 spin_unlock(&mrioc_list_lock); 5506 id_alloc_failed: 5507 scsi_host_put(shost); 5508 shost_failed: 5509 return retval; 5510 } 5511 5512 /** 5513 * mpi3mr_remove - PCI remove callback 5514 * @pdev: PCI device instance 5515 * 5516 * Cleanup the IOC by issuing MUR and shutdown notification. 5517 * Free up all memory and resources associated with the 5518 * controllerand target devices, unregister the shost. 5519 * 5520 * Return: Nothing. 5521 */ 5522 static void mpi3mr_remove(struct pci_dev *pdev) 5523 { 5524 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5525 struct mpi3mr_ioc *mrioc; 5526 struct workqueue_struct *wq; 5527 unsigned long flags; 5528 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 5529 struct mpi3mr_hba_port *port, *hba_port_next; 5530 struct mpi3mr_sas_node *sas_expander, *sas_expander_next; 5531 5532 if (!shost) 5533 return; 5534 5535 mrioc = shost_priv(shost); 5536 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5537 ssleep(1); 5538 5539 if (mrioc->block_on_pci_err) { 5540 mrioc->block_on_pci_err = false; 5541 scsi_unblock_requests(shost); 5542 mrioc->unrecoverable = 1; 5543 } 5544 5545 if (!pci_device_is_present(mrioc->pdev) || 5546 mrioc->pci_err_recovery) { 5547 mrioc->unrecoverable = 1; 5548 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5549 } 5550 5551 mpi3mr_bsg_exit(mrioc); 5552 mrioc->stop_drv_processing = 1; 5553 mpi3mr_cleanup_fwevt_list(mrioc); 5554 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5555 wq = mrioc->fwevt_worker_thread; 5556 mrioc->fwevt_worker_thread = NULL; 5557 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5558 if (wq) 5559 destroy_workqueue(wq); 5560 5561 if (mrioc->sas_transport_enabled) 5562 sas_remove_host(shost); 5563 else 5564 scsi_remove_host(shost); 5565 5566 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 5567 list) { 5568 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 5569 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 5570 mpi3mr_tgtdev_put(tgtdev); 5571 } 5572 mpi3mr_stop_watchdog(mrioc); 5573 mpi3mr_cleanup_ioc(mrioc); 5574 mpi3mr_free_mem(mrioc); 5575 mpi3mr_cleanup_resources(mrioc); 5576 5577 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5578 list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, 5579 &mrioc->sas_expander_list, list) { 5580 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5581 mpi3mr_expander_node_remove(mrioc, sas_expander); 5582 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5583 } 5584 list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { 5585 ioc_info(mrioc, 5586 "removing hba_port entry: %p port: %d from hba_port list\n", 5587 port, port->port_id); 5588 list_del(&port->list); 5589 kfree(port); 5590 } 5591 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5592 5593 if (mrioc->sas_hba.num_phys) { 5594 kfree(mrioc->sas_hba.phy); 5595 mrioc->sas_hba.phy = NULL; 5596 mrioc->sas_hba.num_phys = 0; 5597 } 5598 5599 ida_free(&mrioc_ida, mrioc->id); 5600 spin_lock(&mrioc_list_lock); 5601 list_del(&mrioc->list); 5602 spin_unlock(&mrioc_list_lock); 5603 5604 scsi_host_put(shost); 5605 } 5606 5607 /** 5608 * mpi3mr_shutdown - PCI shutdown callback 5609 * @pdev: PCI device instance 5610 * 5611 * Free up all memory and resources associated with the 5612 * controller 5613 * 5614 * Return: Nothing. 5615 */ 5616 static void mpi3mr_shutdown(struct pci_dev *pdev) 5617 { 5618 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5619 struct mpi3mr_ioc *mrioc; 5620 struct workqueue_struct *wq; 5621 unsigned long flags; 5622 5623 if (!shost) 5624 return; 5625 5626 mrioc = shost_priv(shost); 5627 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5628 ssleep(1); 5629 5630 mrioc->stop_drv_processing = 1; 5631 mpi3mr_cleanup_fwevt_list(mrioc); 5632 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5633 wq = mrioc->fwevt_worker_thread; 5634 mrioc->fwevt_worker_thread = NULL; 5635 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5636 if (wq) 5637 destroy_workqueue(wq); 5638 5639 mpi3mr_stop_watchdog(mrioc); 5640 mpi3mr_cleanup_ioc(mrioc); 5641 mpi3mr_cleanup_resources(mrioc); 5642 } 5643 5644 /** 5645 * mpi3mr_suspend - PCI power management suspend callback 5646 * @dev: Device struct 5647 * 5648 * Change the power state to the given value and cleanup the IOC 5649 * by issuing MUR and shutdown notification 5650 * 5651 * Return: 0 always. 5652 */ 5653 static int __maybe_unused 5654 mpi3mr_suspend(struct device *dev) 5655 { 5656 struct pci_dev *pdev = to_pci_dev(dev); 5657 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5658 struct mpi3mr_ioc *mrioc; 5659 5660 if (!shost) 5661 return 0; 5662 5663 mrioc = shost_priv(shost); 5664 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5665 ssleep(1); 5666 mrioc->stop_drv_processing = 1; 5667 mpi3mr_cleanup_fwevt_list(mrioc); 5668 scsi_block_requests(shost); 5669 mpi3mr_stop_watchdog(mrioc); 5670 mpi3mr_cleanup_ioc(mrioc); 5671 5672 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", 5673 pdev, pci_name(pdev)); 5674 mpi3mr_cleanup_resources(mrioc); 5675 5676 return 0; 5677 } 5678 5679 /** 5680 * mpi3mr_resume - PCI power management resume callback 5681 * @dev: Device struct 5682 * 5683 * Restore the power state to D0 and reinitialize the controller 5684 * and resume I/O operations to the target devices 5685 * 5686 * Return: 0 on success, non-zero on failure 5687 */ 5688 static int __maybe_unused 5689 mpi3mr_resume(struct device *dev) 5690 { 5691 struct pci_dev *pdev = to_pci_dev(dev); 5692 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5693 struct mpi3mr_ioc *mrioc; 5694 pci_power_t device_state = pdev->current_state; 5695 int r; 5696 5697 if (!shost) 5698 return 0; 5699 5700 mrioc = shost_priv(shost); 5701 5702 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 5703 pdev, pci_name(pdev), device_state); 5704 mrioc->pdev = pdev; 5705 mrioc->cpu_count = num_online_cpus(); 5706 r = mpi3mr_setup_resources(mrioc); 5707 if (r) { 5708 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 5709 __func__, r); 5710 return r; 5711 } 5712 5713 mrioc->stop_drv_processing = 0; 5714 mpi3mr_invalidate_devhandles(mrioc); 5715 mpi3mr_free_enclosure_list(mrioc); 5716 mpi3mr_memset_buffers(mrioc); 5717 r = mpi3mr_reinit_ioc(mrioc, 1); 5718 if (r) { 5719 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 5720 return r; 5721 } 5722 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5723 scsi_unblock_requests(shost); 5724 mrioc->device_refresh_on = 0; 5725 mpi3mr_start_watchdog(mrioc); 5726 5727 return 0; 5728 } 5729 5730 /** 5731 * mpi3mr_pcierr_error_detected - PCI error detected callback 5732 * @pdev: PCI device instance 5733 * @state: channel state 5734 * 5735 * This function is called by the PCI error recovery driver and 5736 * based on the state passed the driver decides what actions to 5737 * be recommended back to PCI driver. 5738 * 5739 * For all of the states if there is no valid mrioc or scsi host 5740 * references in the PCI device then this function will return 5741 * the result as disconnect. 5742 * 5743 * For normal state, this function will return the result as can 5744 * recover. 5745 * 5746 * For frozen state, this function will block for any pending 5747 * controller initialization or re-initialization to complete, 5748 * stop any new interactions with the controller and return 5749 * status as reset required. 5750 * 5751 * For permanent failure state, this function will mark the 5752 * controller as unrecoverable and return status as disconnect. 5753 * 5754 * Returns: PCI_ERS_RESULT_NEED_RESET or CAN_RECOVER or 5755 * DISCONNECT based on the controller state. 5756 */ 5757 static pci_ers_result_t 5758 mpi3mr_pcierr_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5759 { 5760 struct Scsi_Host *shost; 5761 struct mpi3mr_ioc *mrioc; 5762 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 5763 5764 dev_info(&pdev->dev, "%s: callback invoked state(%d)\n", __func__, 5765 state); 5766 5767 shost = pci_get_drvdata(pdev); 5768 mrioc = shost_priv(shost); 5769 5770 switch (state) { 5771 case pci_channel_io_normal: 5772 return PCI_ERS_RESULT_CAN_RECOVER; 5773 case pci_channel_io_frozen: 5774 mrioc->pci_err_recovery = true; 5775 mrioc->block_on_pci_err = true; 5776 do { 5777 if (mrioc->reset_in_progress || mrioc->is_driver_loading) 5778 ssleep(1); 5779 else 5780 break; 5781 } while (--timeout); 5782 5783 if (!timeout) { 5784 mrioc->pci_err_recovery = true; 5785 mrioc->block_on_pci_err = true; 5786 mrioc->unrecoverable = 1; 5787 mpi3mr_stop_watchdog(mrioc); 5788 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5789 return PCI_ERS_RESULT_DISCONNECT; 5790 } 5791 5792 scsi_block_requests(mrioc->shost); 5793 mpi3mr_stop_watchdog(mrioc); 5794 mpi3mr_cleanup_resources(mrioc); 5795 return PCI_ERS_RESULT_NEED_RESET; 5796 case pci_channel_io_perm_failure: 5797 mrioc->pci_err_recovery = true; 5798 mrioc->block_on_pci_err = true; 5799 mrioc->unrecoverable = 1; 5800 mpi3mr_stop_watchdog(mrioc); 5801 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5802 return PCI_ERS_RESULT_DISCONNECT; 5803 default: 5804 return PCI_ERS_RESULT_DISCONNECT; 5805 } 5806 } 5807 5808 /** 5809 * mpi3mr_pcierr_slot_reset - Post slot reset callback 5810 * @pdev: PCI device instance 5811 * 5812 * This function is called by the PCI error recovery driver 5813 * after a slot or link reset issued by it for the recovery, the 5814 * driver is expected to bring back the controller and 5815 * initialize it. 5816 * 5817 * This function restores PCI state and reinitializes controller 5818 * resources and the controller, this blocks for any pending 5819 * reset to complete. 5820 * 5821 * Returns: PCI_ERS_RESULT_DISCONNECT on failure or 5822 * PCI_ERS_RESULT_RECOVERED 5823 */ 5824 static pci_ers_result_t mpi3mr_pcierr_slot_reset(struct pci_dev *pdev) 5825 { 5826 struct Scsi_Host *shost; 5827 struct mpi3mr_ioc *mrioc; 5828 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 5829 5830 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5831 5832 shost = pci_get_drvdata(pdev); 5833 mrioc = shost_priv(shost); 5834 5835 do { 5836 if (mrioc->reset_in_progress) 5837 ssleep(1); 5838 else 5839 break; 5840 } while (--timeout); 5841 5842 if (!timeout) 5843 goto out_failed; 5844 5845 pci_restore_state(pdev); 5846 5847 if (mpi3mr_setup_resources(mrioc)) { 5848 ioc_err(mrioc, "setup resources failed\n"); 5849 goto out_failed; 5850 } 5851 mrioc->unrecoverable = 0; 5852 mrioc->pci_err_recovery = false; 5853 5854 if (mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0)) 5855 goto out_failed; 5856 5857 return PCI_ERS_RESULT_RECOVERED; 5858 5859 out_failed: 5860 mrioc->unrecoverable = 1; 5861 mrioc->block_on_pci_err = false; 5862 scsi_unblock_requests(shost); 5863 mpi3mr_start_watchdog(mrioc); 5864 return PCI_ERS_RESULT_DISCONNECT; 5865 } 5866 5867 /** 5868 * mpi3mr_pcierr_resume - PCI error recovery resume 5869 * callback 5870 * @pdev: PCI device instance 5871 * 5872 * This function enables all I/O and IOCTLs post reset issued as 5873 * part of the PCI error recovery 5874 * 5875 * Return: Nothing. 5876 */ 5877 static void mpi3mr_pcierr_resume(struct pci_dev *pdev) 5878 { 5879 struct Scsi_Host *shost; 5880 struct mpi3mr_ioc *mrioc; 5881 5882 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5883 5884 shost = pci_get_drvdata(pdev); 5885 mrioc = shost_priv(shost); 5886 5887 if (mrioc->block_on_pci_err) { 5888 mrioc->block_on_pci_err = false; 5889 scsi_unblock_requests(shost); 5890 mpi3mr_start_watchdog(mrioc); 5891 } 5892 } 5893 5894 /** 5895 * mpi3mr_pcierr_mmio_enabled - PCI error recovery callback 5896 * @pdev: PCI device instance 5897 * 5898 * This is called only if mpi3mr_pcierr_error_detected returns 5899 * PCI_ERS_RESULT_CAN_RECOVER. 5900 * 5901 * Return: PCI_ERS_RESULT_DISCONNECT when the controller is 5902 * unrecoverable or when the shost/mrioc reference cannot be 5903 * found, else return PCI_ERS_RESULT_RECOVERED 5904 */ 5905 static pci_ers_result_t mpi3mr_pcierr_mmio_enabled(struct pci_dev *pdev) 5906 { 5907 struct Scsi_Host *shost; 5908 struct mpi3mr_ioc *mrioc; 5909 5910 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5911 5912 shost = pci_get_drvdata(pdev); 5913 mrioc = shost_priv(shost); 5914 5915 if (mrioc->unrecoverable) 5916 return PCI_ERS_RESULT_DISCONNECT; 5917 5918 return PCI_ERS_RESULT_RECOVERED; 5919 } 5920 5921 static const struct pci_device_id mpi3mr_pci_id_table[] = { 5922 { 5923 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5924 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 5925 }, 5926 { 5927 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5928 MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID) 5929 }, 5930 { 5931 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5932 MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID) 5933 }, 5934 { 0 } 5935 }; 5936 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 5937 5938 static const struct pci_error_handlers mpi3mr_err_handler = { 5939 .error_detected = mpi3mr_pcierr_error_detected, 5940 .mmio_enabled = mpi3mr_pcierr_mmio_enabled, 5941 .slot_reset = mpi3mr_pcierr_slot_reset, 5942 .resume = mpi3mr_pcierr_resume, 5943 }; 5944 5945 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); 5946 5947 static struct pci_driver mpi3mr_pci_driver = { 5948 .name = MPI3MR_DRIVER_NAME, 5949 .id_table = mpi3mr_pci_id_table, 5950 .probe = mpi3mr_probe, 5951 .remove = mpi3mr_remove, 5952 .shutdown = mpi3mr_shutdown, 5953 .err_handler = &mpi3mr_err_handler, 5954 .driver.pm = &mpi3mr_pm_ops, 5955 }; 5956 5957 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 5958 { 5959 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 5960 } 5961 static DRIVER_ATTR_RO(event_counter); 5962 5963 static int __init mpi3mr_init(void) 5964 { 5965 int ret_val; 5966 5967 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 5968 MPI3MR_DRIVER_VERSION); 5969 5970 mpi3mr_transport_template = 5971 sas_attach_transport(&mpi3mr_transport_functions); 5972 if (!mpi3mr_transport_template) { 5973 pr_err("%s failed to load due to sas transport attach failure\n", 5974 MPI3MR_DRIVER_NAME); 5975 return -ENODEV; 5976 } 5977 5978 ret_val = pci_register_driver(&mpi3mr_pci_driver); 5979 if (ret_val) { 5980 pr_err("%s failed to load due to pci register driver failure\n", 5981 MPI3MR_DRIVER_NAME); 5982 goto err_pci_reg_fail; 5983 } 5984 5985 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 5986 &driver_attr_event_counter); 5987 if (ret_val) 5988 goto err_event_counter; 5989 5990 return ret_val; 5991 5992 err_event_counter: 5993 pci_unregister_driver(&mpi3mr_pci_driver); 5994 5995 err_pci_reg_fail: 5996 sas_release_transport(mpi3mr_transport_template); 5997 return ret_val; 5998 } 5999 6000 static void __exit mpi3mr_exit(void) 6001 { 6002 if (warn_non_secure_ctlr) 6003 pr_warn( 6004 "Unloading %s version %s while managing a non secure controller\n", 6005 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 6006 else 6007 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 6008 MPI3MR_DRIVER_VERSION); 6009 6010 driver_remove_file(&mpi3mr_pci_driver.driver, 6011 &driver_attr_event_counter); 6012 pci_unregister_driver(&mpi3mr_pci_driver); 6013 sas_release_transport(mpi3mr_transport_template); 6014 ida_destroy(&mrioc_ida); 6015 } 6016 6017 module_init(mpi3mr_init); 6018 module_exit(mpi3mr_exit); 6019