1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define LINK_RATE_BIT_MASK 2 11 #define FIS_BUF_SIZE 20 12 #define WAIT_CMD_COMPLETE_DELAY 100 13 #define WAIT_CMD_COMPLETE_TMROUT 5000 14 #define DELAY_FOR_LINK_READY 2000 15 #define BLK_CNT_OPTIMIZE_MARK 64 16 #define HZ_TO_MHZ 1000000 17 #define DELAY_FOR_SOFTRESET_MAX 1000 18 #define DELAY_FOR_SOFTRESET_MIN 900 19 20 #define DEV_IS_GONE(dev) \ 21 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 22 23 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 24 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 25 void *funcdata); 26 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 27 struct domain_device *device); 28 static void hisi_sas_dev_gone(struct domain_device *device); 29 30 struct hisi_sas_internal_abort_data { 31 bool rst_ha_timeout; /* reset the HA for timeout */ 32 }; 33 34 static u8 hisi_sas_get_ata_protocol_from_tf(struct ata_queued_cmd *qc) 35 { 36 if (!qc) 37 return HISI_SAS_SATA_PROTOCOL_PIO; 38 39 switch (qc->tf.protocol) { 40 case ATA_PROT_NODATA: 41 return HISI_SAS_SATA_PROTOCOL_NONDATA; 42 case ATA_PROT_PIO: 43 return HISI_SAS_SATA_PROTOCOL_PIO; 44 case ATA_PROT_DMA: 45 return HISI_SAS_SATA_PROTOCOL_DMA; 46 case ATA_PROT_NCQ_NODATA: 47 case ATA_PROT_NCQ: 48 return HISI_SAS_SATA_PROTOCOL_FPDMA; 49 default: 50 return HISI_SAS_SATA_PROTOCOL_PIO; 51 } 52 } 53 54 u8 hisi_sas_get_ata_protocol(struct sas_task *task) 55 { 56 struct host_to_dev_fis *fis = &task->ata_task.fis; 57 struct ata_queued_cmd *qc = task->uldd_task; 58 int direction = task->data_dir; 59 60 switch (fis->command) { 61 case ATA_CMD_FPDMA_WRITE: 62 case ATA_CMD_FPDMA_READ: 63 case ATA_CMD_FPDMA_RECV: 64 case ATA_CMD_FPDMA_SEND: 65 case ATA_CMD_NCQ_NON_DATA: 66 return HISI_SAS_SATA_PROTOCOL_FPDMA; 67 68 case ATA_CMD_DOWNLOAD_MICRO: 69 case ATA_CMD_ID_ATA: 70 case ATA_CMD_PMP_READ: 71 case ATA_CMD_READ_LOG_EXT: 72 case ATA_CMD_PIO_READ: 73 case ATA_CMD_PIO_READ_EXT: 74 case ATA_CMD_PMP_WRITE: 75 case ATA_CMD_WRITE_LOG_EXT: 76 case ATA_CMD_PIO_WRITE: 77 case ATA_CMD_PIO_WRITE_EXT: 78 return HISI_SAS_SATA_PROTOCOL_PIO; 79 80 case ATA_CMD_DSM: 81 case ATA_CMD_DOWNLOAD_MICRO_DMA: 82 case ATA_CMD_PMP_READ_DMA: 83 case ATA_CMD_PMP_WRITE_DMA: 84 case ATA_CMD_READ: 85 case ATA_CMD_READ_EXT: 86 case ATA_CMD_READ_LOG_DMA_EXT: 87 case ATA_CMD_READ_STREAM_DMA_EXT: 88 case ATA_CMD_TRUSTED_RCV_DMA: 89 case ATA_CMD_TRUSTED_SND_DMA: 90 case ATA_CMD_WRITE: 91 case ATA_CMD_WRITE_EXT: 92 case ATA_CMD_WRITE_FUA_EXT: 93 case ATA_CMD_WRITE_QUEUED: 94 case ATA_CMD_WRITE_LOG_DMA_EXT: 95 case ATA_CMD_WRITE_STREAM_DMA_EXT: 96 case ATA_CMD_ZAC_MGMT_IN: 97 return HISI_SAS_SATA_PROTOCOL_DMA; 98 99 case ATA_CMD_CHK_POWER: 100 case ATA_CMD_DEV_RESET: 101 case ATA_CMD_EDD: 102 case ATA_CMD_FLUSH: 103 case ATA_CMD_FLUSH_EXT: 104 case ATA_CMD_VERIFY: 105 case ATA_CMD_VERIFY_EXT: 106 case ATA_CMD_SET_FEATURES: 107 case ATA_CMD_STANDBY: 108 case ATA_CMD_STANDBYNOW1: 109 case ATA_CMD_ZAC_MGMT_OUT: 110 return HISI_SAS_SATA_PROTOCOL_NONDATA; 111 112 case ATA_CMD_SET_MAX: 113 switch (fis->features) { 114 case ATA_SET_MAX_PASSWD: 115 case ATA_SET_MAX_LOCK: 116 return HISI_SAS_SATA_PROTOCOL_PIO; 117 118 case ATA_SET_MAX_PASSWD_DMA: 119 case ATA_SET_MAX_UNLOCK_DMA: 120 return HISI_SAS_SATA_PROTOCOL_DMA; 121 122 default: 123 return HISI_SAS_SATA_PROTOCOL_NONDATA; 124 } 125 126 default: 127 if (direction == DMA_NONE) 128 return HISI_SAS_SATA_PROTOCOL_NONDATA; 129 return hisi_sas_get_ata_protocol_from_tf(qc); 130 } 131 } 132 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 133 134 void hisi_sas_sata_done(struct sas_task *task, 135 struct hisi_sas_slot *slot) 136 { 137 struct task_status_struct *ts = &task->task_status; 138 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 139 struct hisi_sas_status_buffer *status_buf = 140 hisi_sas_status_buf_addr_mem(slot); 141 u8 *iu = &status_buf->iu[0]; 142 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 143 144 resp->frame_len = sizeof(struct dev_to_host_fis); 145 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 146 147 ts->buf_valid_size = sizeof(*resp); 148 } 149 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 150 151 /* 152 * This function assumes linkrate mask fits in 8 bits, which it 153 * does for all HW versions supported. 154 */ 155 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 156 { 157 u8 rate = 0; 158 int i; 159 160 max -= SAS_LINK_RATE_1_5_GBPS; 161 for (i = 0; i <= max; i++) 162 rate |= 1 << (i * LINK_RATE_BIT_MASK); 163 return rate; 164 } 165 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 166 167 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 168 { 169 return device->port->ha->lldd_ha; 170 } 171 172 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 173 { 174 return container_of(sas_port, struct hisi_sas_port, sas_port); 175 } 176 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 177 178 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 179 { 180 int phy_no; 181 182 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 183 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 184 } 185 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 186 187 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 188 { 189 void *bitmap = hisi_hba->slot_index_tags; 190 191 __clear_bit(slot_idx, bitmap); 192 } 193 194 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 195 { 196 if (hisi_hba->hw->slot_index_alloc || 197 slot_idx < HISI_SAS_RESERVED_IPTT) { 198 spin_lock(&hisi_hba->lock); 199 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 200 spin_unlock(&hisi_hba->lock); 201 } 202 } 203 204 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 205 { 206 void *bitmap = hisi_hba->slot_index_tags; 207 208 __set_bit(slot_idx, bitmap); 209 } 210 211 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 212 struct request *rq) 213 { 214 int index; 215 void *bitmap = hisi_hba->slot_index_tags; 216 217 if (rq) 218 return rq->tag + HISI_SAS_RESERVED_IPTT; 219 220 spin_lock(&hisi_hba->lock); 221 index = find_next_zero_bit(bitmap, HISI_SAS_RESERVED_IPTT, 222 hisi_hba->last_slot_index + 1); 223 if (index >= HISI_SAS_RESERVED_IPTT) { 224 index = find_next_zero_bit(bitmap, 225 HISI_SAS_RESERVED_IPTT, 226 0); 227 if (index >= HISI_SAS_RESERVED_IPTT) { 228 spin_unlock(&hisi_hba->lock); 229 return -SAS_QUEUE_FULL; 230 } 231 } 232 hisi_sas_slot_index_set(hisi_hba, index); 233 hisi_hba->last_slot_index = index; 234 spin_unlock(&hisi_hba->lock); 235 236 return index; 237 } 238 239 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 240 struct hisi_sas_slot *slot, bool need_lock) 241 { 242 int device_id = slot->device_id; 243 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 244 245 if (task) { 246 struct device *dev = hisi_hba->dev; 247 248 if (!task->lldd_task) 249 return; 250 251 task->lldd_task = NULL; 252 253 if (!sas_protocol_ata(task->task_proto)) { 254 if (slot->n_elem) { 255 if (task->task_proto & SAS_PROTOCOL_SSP) 256 dma_unmap_sg(dev, task->scatter, 257 task->num_scatter, 258 task->data_dir); 259 else 260 dma_unmap_sg(dev, &task->smp_task.smp_req, 261 1, DMA_TO_DEVICE); 262 } 263 if (slot->n_elem_dif) { 264 struct sas_ssp_task *ssp_task = &task->ssp_task; 265 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 266 267 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 268 scsi_prot_sg_count(scsi_cmnd), 269 task->data_dir); 270 } 271 } 272 } 273 274 if (need_lock) { 275 spin_lock(&sas_dev->lock); 276 list_del_init(&slot->entry); 277 spin_unlock(&sas_dev->lock); 278 } else { 279 list_del_init(&slot->entry); 280 } 281 282 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 283 284 hisi_sas_slot_index_free(hisi_hba, slot->idx); 285 } 286 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 287 288 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 289 struct hisi_sas_slot *slot) 290 { 291 hisi_hba->hw->prep_smp(hisi_hba, slot); 292 } 293 294 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 295 struct hisi_sas_slot *slot) 296 { 297 hisi_hba->hw->prep_ssp(hisi_hba, slot); 298 } 299 300 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 301 struct hisi_sas_slot *slot) 302 { 303 hisi_hba->hw->prep_stp(hisi_hba, slot); 304 } 305 306 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 307 struct hisi_sas_slot *slot) 308 { 309 hisi_hba->hw->prep_abort(hisi_hba, slot); 310 } 311 312 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 313 struct sas_task *task, int n_elem) 314 { 315 struct device *dev = hisi_hba->dev; 316 317 if (!sas_protocol_ata(task->task_proto) && n_elem) { 318 if (task->num_scatter) { 319 dma_unmap_sg(dev, task->scatter, task->num_scatter, 320 task->data_dir); 321 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 322 dma_unmap_sg(dev, &task->smp_task.smp_req, 323 1, DMA_TO_DEVICE); 324 } 325 } 326 } 327 328 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 329 struct sas_task *task, int *n_elem) 330 { 331 struct device *dev = hisi_hba->dev; 332 int rc; 333 334 if (sas_protocol_ata(task->task_proto)) { 335 *n_elem = task->num_scatter; 336 } else { 337 unsigned int req_len; 338 339 if (task->num_scatter) { 340 *n_elem = dma_map_sg(dev, task->scatter, 341 task->num_scatter, task->data_dir); 342 if (!*n_elem) { 343 rc = -ENOMEM; 344 goto prep_out; 345 } 346 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 347 *n_elem = dma_map_sg(dev, &task->smp_task.smp_req, 348 1, DMA_TO_DEVICE); 349 if (!*n_elem) { 350 rc = -ENOMEM; 351 goto prep_out; 352 } 353 req_len = sg_dma_len(&task->smp_task.smp_req); 354 if (req_len & 0x3) { 355 rc = -EINVAL; 356 goto err_out_dma_unmap; 357 } 358 } 359 } 360 361 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 362 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", 363 *n_elem); 364 rc = -EINVAL; 365 goto err_out_dma_unmap; 366 } 367 return 0; 368 369 err_out_dma_unmap: 370 /* It would be better to call dma_unmap_sg() here, but it's messy */ 371 hisi_sas_dma_unmap(hisi_hba, task, *n_elem); 372 prep_out: 373 return rc; 374 } 375 376 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 377 struct sas_task *task, int n_elem_dif) 378 { 379 struct device *dev = hisi_hba->dev; 380 381 if (n_elem_dif) { 382 struct sas_ssp_task *ssp_task = &task->ssp_task; 383 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 384 385 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 386 scsi_prot_sg_count(scsi_cmnd), 387 task->data_dir); 388 } 389 } 390 391 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 392 int *n_elem_dif, struct sas_task *task) 393 { 394 struct device *dev = hisi_hba->dev; 395 struct sas_ssp_task *ssp_task; 396 struct scsi_cmnd *scsi_cmnd; 397 int rc; 398 399 if (task->num_scatter) { 400 ssp_task = &task->ssp_task; 401 scsi_cmnd = ssp_task->cmd; 402 403 if (scsi_prot_sg_count(scsi_cmnd)) { 404 *n_elem_dif = dma_map_sg(dev, 405 scsi_prot_sglist(scsi_cmnd), 406 scsi_prot_sg_count(scsi_cmnd), 407 task->data_dir); 408 409 if (!*n_elem_dif) 410 return -ENOMEM; 411 412 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 413 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 414 *n_elem_dif); 415 rc = -EINVAL; 416 goto err_out_dif_dma_unmap; 417 } 418 } 419 } 420 421 return 0; 422 423 err_out_dif_dma_unmap: 424 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 425 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 426 return rc; 427 } 428 429 static 430 void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, 431 struct hisi_sas_slot *slot, 432 struct hisi_sas_dq *dq, 433 struct hisi_sas_device *sas_dev) 434 { 435 struct hisi_sas_cmd_hdr *cmd_hdr_base; 436 int dlvry_queue_slot, dlvry_queue; 437 struct sas_task *task = slot->task; 438 int wr_q_index; 439 440 spin_lock(&dq->lock); 441 wr_q_index = dq->wr_point; 442 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 443 list_add_tail(&slot->delivery, &dq->list); 444 spin_unlock(&dq->lock); 445 spin_lock(&sas_dev->lock); 446 list_add_tail(&slot->entry, &sas_dev->list); 447 spin_unlock(&sas_dev->lock); 448 449 dlvry_queue = dq->id; 450 dlvry_queue_slot = wr_q_index; 451 452 slot->device_id = sas_dev->device_id; 453 slot->dlvry_queue = dlvry_queue; 454 slot->dlvry_queue_slot = dlvry_queue_slot; 455 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 456 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 457 458 task->lldd_task = slot; 459 460 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 461 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 462 memset(hisi_sas_status_buf_addr_mem(slot), 0, 463 sizeof(struct hisi_sas_err_record)); 464 465 switch (task->task_proto) { 466 case SAS_PROTOCOL_SMP: 467 hisi_sas_task_prep_smp(hisi_hba, slot); 468 break; 469 case SAS_PROTOCOL_SSP: 470 hisi_sas_task_prep_ssp(hisi_hba, slot); 471 break; 472 case SAS_PROTOCOL_SATA: 473 case SAS_PROTOCOL_STP: 474 case SAS_PROTOCOL_STP_ALL: 475 hisi_sas_task_prep_ata(hisi_hba, slot); 476 break; 477 case SAS_PROTOCOL_INTERNAL_ABORT: 478 hisi_sas_task_prep_abort(hisi_hba, slot); 479 break; 480 default: 481 return; 482 } 483 484 /* Make slot memories observable before marking as ready */ 485 smp_wmb(); 486 WRITE_ONCE(slot->ready, 1); 487 488 spin_lock(&dq->lock); 489 hisi_hba->hw->start_delivery(dq); 490 spin_unlock(&dq->lock); 491 } 492 493 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 494 { 495 int n_elem = 0, n_elem_dif = 0; 496 struct domain_device *device = task->dev; 497 struct asd_sas_port *sas_port = device->port; 498 struct hisi_sas_device *sas_dev = device->lldd_dev; 499 bool internal_abort = sas_is_internal_abort(task); 500 struct hisi_sas_dq *dq = NULL; 501 struct hisi_sas_port *port; 502 struct hisi_hba *hisi_hba; 503 struct hisi_sas_slot *slot; 504 struct request *rq = NULL; 505 struct device *dev; 506 int rc; 507 508 if (!sas_port) { 509 struct task_status_struct *ts = &task->task_status; 510 511 ts->resp = SAS_TASK_UNDELIVERED; 512 ts->stat = SAS_PHY_DOWN; 513 /* 514 * libsas will use dev->port, should 515 * not call task_done for sata 516 */ 517 if (device->dev_type != SAS_SATA_DEV && !internal_abort) 518 task->task_done(task); 519 return -ECOMM; 520 } 521 522 hisi_hba = dev_to_hisi_hba(device); 523 dev = hisi_hba->dev; 524 525 switch (task->task_proto) { 526 case SAS_PROTOCOL_SSP: 527 case SAS_PROTOCOL_SMP: 528 case SAS_PROTOCOL_SATA: 529 case SAS_PROTOCOL_STP: 530 case SAS_PROTOCOL_STP_ALL: 531 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 532 if (!gfpflags_allow_blocking(gfp_flags)) 533 return -EINVAL; 534 535 down(&hisi_hba->sem); 536 up(&hisi_hba->sem); 537 } 538 539 if (DEV_IS_GONE(sas_dev)) { 540 if (sas_dev) 541 dev_info(dev, "task prep: device %d not ready\n", 542 sas_dev->device_id); 543 else 544 dev_info(dev, "task prep: device %016llx not ready\n", 545 SAS_ADDR(device->sas_addr)); 546 547 return -ECOMM; 548 } 549 550 port = to_hisi_sas_port(sas_port); 551 if (!port->port_attached) { 552 dev_info(dev, "task prep: %s port%d not attach device\n", 553 dev_is_sata(device) ? "SATA/STP" : "SAS", 554 device->port->id); 555 556 return -ECOMM; 557 } 558 559 rq = sas_task_find_rq(task); 560 if (rq) { 561 unsigned int dq_index; 562 u32 blk_tag; 563 564 blk_tag = blk_mq_unique_tag(rq); 565 dq_index = blk_mq_unique_tag_to_hwq(blk_tag); 566 dq = &hisi_hba->dq[dq_index]; 567 } else { 568 int queue; 569 570 if (hisi_hba->iopoll_q_cnt) { 571 /* 572 * Use interrupt queue (queue 0) to deliver and complete 573 * internal IOs of libsas or libata when there is at least 574 * one iopoll queue 575 */ 576 queue = 0; 577 } else { 578 struct Scsi_Host *shost = hisi_hba->shost; 579 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 580 581 queue = qmap->mq_map[raw_smp_processor_id()]; 582 } 583 dq = &hisi_hba->dq[queue]; 584 } 585 break; 586 case SAS_PROTOCOL_INTERNAL_ABORT: 587 if (!hisi_hba->hw->prep_abort) 588 return TMF_RESP_FUNC_FAILED; 589 590 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) 591 return -EIO; 592 593 hisi_hba = dev_to_hisi_hba(device); 594 595 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 596 return -EINVAL; 597 598 port = to_hisi_sas_port(sas_port); 599 dq = &hisi_hba->dq[task->abort_task.qid]; 600 break; 601 default: 602 dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", 603 task->task_proto); 604 return -EINVAL; 605 } 606 607 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem); 608 if (rc < 0) 609 goto prep_out; 610 611 if (!sas_protocol_ata(task->task_proto)) { 612 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 613 if (rc < 0) 614 goto err_out_dma_unmap; 615 } 616 617 if (!internal_abort && hisi_hba->hw->slot_index_alloc) 618 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 619 else 620 rc = hisi_sas_slot_index_alloc(hisi_hba, rq); 621 622 if (rc < 0) 623 goto err_out_dif_dma_unmap; 624 625 slot = &hisi_hba->slot_info[rc]; 626 slot->n_elem = n_elem; 627 slot->n_elem_dif = n_elem_dif; 628 slot->task = task; 629 slot->port = port; 630 631 slot->tmf = task->tmf; 632 slot->is_internal = !!task->tmf || internal_abort; 633 634 /* protect task_prep and start_delivery sequence */ 635 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev); 636 637 return 0; 638 639 err_out_dif_dma_unmap: 640 if (!sas_protocol_ata(task->task_proto)) 641 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 642 err_out_dma_unmap: 643 hisi_sas_dma_unmap(hisi_hba, task, n_elem); 644 prep_out: 645 dev_err(dev, "task exec: failed[%d]!\n", rc); 646 return rc; 647 } 648 649 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, 650 gfp_t gfp_flags) 651 { 652 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 653 struct asd_sas_phy *sas_phy = &phy->sas_phy; 654 655 if (!phy->phy_attached) 656 return; 657 658 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 659 660 if (sas_phy->phy) { 661 struct sas_phy *sphy = sas_phy->phy; 662 663 sphy->negotiated_linkrate = sas_phy->linkrate; 664 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 665 sphy->maximum_linkrate_hw = 666 hisi_hba->hw->phy_get_max_linkrate(); 667 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 668 sphy->minimum_linkrate = phy->minimum_linkrate; 669 670 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 671 sphy->maximum_linkrate = phy->maximum_linkrate; 672 } 673 674 if (phy->phy_type & PORT_TYPE_SAS) { 675 struct sas_identify_frame *id; 676 677 id = (struct sas_identify_frame *)phy->frame_rcvd; 678 id->dev_type = phy->identify.device_type; 679 id->initiator_bits = SAS_PROTOCOL_ALL; 680 id->target_bits = phy->identify.target_port_protocols; 681 } else if (phy->phy_type & PORT_TYPE_SATA) { 682 /* Nothing */ 683 } 684 685 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 686 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 687 } 688 689 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 690 { 691 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 692 struct hisi_sas_device *sas_dev = NULL; 693 int last = hisi_hba->last_dev_id; 694 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 695 int i; 696 697 spin_lock(&hisi_hba->lock); 698 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 699 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 700 int queue = i % hisi_hba->queue_count; 701 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 702 703 hisi_hba->devices[i].device_id = i; 704 sas_dev = &hisi_hba->devices[i]; 705 sas_dev->dev_status = HISI_SAS_DEV_INIT; 706 sas_dev->dev_type = device->dev_type; 707 sas_dev->hisi_hba = hisi_hba; 708 sas_dev->sas_device = device; 709 sas_dev->dq = dq; 710 spin_lock_init(&sas_dev->lock); 711 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 712 break; 713 } 714 i++; 715 } 716 hisi_hba->last_dev_id = i; 717 spin_unlock(&hisi_hba->lock); 718 719 return sas_dev; 720 } 721 722 static void hisi_sas_sync_poll_cq(struct hisi_sas_cq *cq) 723 { 724 /* make sure CQ entries being processed are processed to completion */ 725 spin_lock(&cq->poll_lock); 726 spin_unlock(&cq->poll_lock); 727 } 728 729 static bool hisi_sas_queue_is_poll(struct hisi_sas_cq *cq) 730 { 731 struct hisi_hba *hisi_hba = cq->hisi_hba; 732 733 if (cq->id < hisi_hba->queue_count - hisi_hba->iopoll_q_cnt) 734 return false; 735 return true; 736 } 737 738 static void hisi_sas_sync_cq(struct hisi_sas_cq *cq) 739 { 740 if (hisi_sas_queue_is_poll(cq)) 741 hisi_sas_sync_poll_cq(cq); 742 else 743 synchronize_irq(cq->irq_no); 744 } 745 746 void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba) 747 { 748 int i; 749 750 for (i = 0; i < hisi_hba->queue_count; i++) { 751 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 752 753 if (hisi_sas_queue_is_poll(cq)) 754 hisi_sas_sync_poll_cq(cq); 755 } 756 } 757 EXPORT_SYMBOL_GPL(hisi_sas_sync_poll_cqs); 758 759 void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba) 760 { 761 int i; 762 763 for (i = 0; i < hisi_hba->queue_count; i++) { 764 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 765 766 hisi_sas_sync_cq(cq); 767 } 768 } 769 EXPORT_SYMBOL_GPL(hisi_sas_sync_cqs); 770 771 static void hisi_sas_tmf_aborted(struct sas_task *task) 772 { 773 struct hisi_sas_slot *slot = task->lldd_task; 774 struct domain_device *device = task->dev; 775 struct hisi_sas_device *sas_dev = device->lldd_dev; 776 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 777 778 if (slot) { 779 struct hisi_sas_cq *cq = 780 &hisi_hba->cq[slot->dlvry_queue]; 781 /* 782 * sync irq or poll queue to avoid free'ing task 783 * before using task in IO completion 784 */ 785 hisi_sas_sync_cq(cq); 786 slot->task = NULL; 787 } 788 } 789 790 #define HISI_SAS_DISK_RECOVER_CNT 3 791 static int hisi_sas_init_device(struct domain_device *device) 792 { 793 int rc = TMF_RESP_FUNC_COMPLETE; 794 struct scsi_lun lun; 795 int retry = HISI_SAS_DISK_RECOVER_CNT; 796 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 797 798 switch (device->dev_type) { 799 case SAS_END_DEVICE: 800 int_to_scsilun(0, &lun); 801 802 while (retry-- > 0) { 803 rc = sas_abort_task_set(device, lun.scsi_lun); 804 if (rc == TMF_RESP_FUNC_COMPLETE) { 805 hisi_sas_release_task(hisi_hba, device); 806 break; 807 } 808 } 809 break; 810 case SAS_SATA_DEV: 811 case SAS_SATA_PM: 812 case SAS_SATA_PM_PORT: 813 case SAS_SATA_PENDING: 814 /* 815 * If an expander is swapped when a SATA disk is attached then 816 * we should issue a hard reset to clear previous affiliation 817 * of STP target port, see SPL (chapter 6.19.4). 818 * 819 * However we don't need to issue a hard reset here for these 820 * reasons: 821 * a. When probing the device, libsas/libata already issues a 822 * hard reset in sas_probe_sata() -> ata_port_probe(). 823 * Note that in hisi_sas_debug_I_T_nexus_reset() we take care 824 * to issue a hard reset by checking the dev status (== INIT). 825 * b. When resetting the controller, this is simply unnecessary. 826 */ 827 while (retry-- > 0) { 828 rc = hisi_sas_softreset_ata_disk(device); 829 if (!rc) 830 break; 831 } 832 break; 833 default: 834 break; 835 } 836 837 return rc; 838 } 839 840 int hisi_sas_sdev_init(struct scsi_device *sdev) 841 { 842 struct domain_device *ddev = sdev_to_domain_dev(sdev); 843 struct hisi_sas_device *sas_dev = ddev->lldd_dev; 844 int rc; 845 846 rc = sas_sdev_init(sdev); 847 if (rc) 848 return rc; 849 850 rc = hisi_sas_init_device(ddev); 851 if (rc) 852 return rc; 853 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 854 return 0; 855 } 856 EXPORT_SYMBOL_GPL(hisi_sas_sdev_init); 857 858 static int hisi_sas_dev_found(struct domain_device *device) 859 { 860 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 861 struct domain_device *parent_dev = device->parent; 862 struct hisi_sas_device *sas_dev; 863 struct device *dev = hisi_hba->dev; 864 int rc; 865 866 if (hisi_hba->hw->alloc_dev) 867 sas_dev = hisi_hba->hw->alloc_dev(device); 868 else 869 sas_dev = hisi_sas_alloc_dev(device); 870 if (!sas_dev) { 871 dev_err(dev, "fail alloc dev: max support %d devices\n", 872 HISI_SAS_MAX_DEVICES); 873 return -EINVAL; 874 } 875 876 device->lldd_dev = sas_dev; 877 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 878 879 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 880 int phy_no; 881 882 phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device); 883 if (phy_no < 0) { 884 dev_info(dev, "dev found: no attached " 885 "dev:%016llx at ex:%016llx\n", 886 SAS_ADDR(device->sas_addr), 887 SAS_ADDR(parent_dev->sas_addr)); 888 rc = phy_no; 889 goto err_out; 890 } 891 } 892 893 dev_info(dev, "dev[%d:%x] found\n", 894 sas_dev->device_id, sas_dev->dev_type); 895 896 return 0; 897 898 err_out: 899 hisi_sas_dev_gone(device); 900 return rc; 901 } 902 903 int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) 904 { 905 struct domain_device *dev = sdev_to_domain_dev(sdev); 906 int ret = sas_sdev_configure(sdev, lim); 907 908 if (ret) 909 return ret; 910 if (!dev_is_sata(dev)) 911 sas_change_queue_depth(sdev, HISI_SAS_BLK_QUEUE_DEPTH); 912 913 return 0; 914 } 915 EXPORT_SYMBOL_GPL(hisi_sas_sdev_configure); 916 917 void hisi_sas_scan_start(struct Scsi_Host *shost) 918 { 919 struct hisi_hba *hisi_hba = shost_priv(shost); 920 921 hisi_hba->hw->phys_init(hisi_hba); 922 } 923 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 924 925 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 926 { 927 struct hisi_hba *hisi_hba = shost_priv(shost); 928 struct sas_ha_struct *sha = &hisi_hba->sha; 929 930 /* Wait for PHY up interrupt to occur */ 931 if (time < HZ) 932 return 0; 933 934 sas_drain_work(sha); 935 return 1; 936 } 937 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 938 939 static void hisi_sas_phyup_work_common(struct work_struct *work, 940 enum hisi_sas_phy_event event) 941 { 942 struct hisi_sas_phy *phy = 943 container_of(work, typeof(*phy), works[event]); 944 struct hisi_hba *hisi_hba = phy->hisi_hba; 945 struct asd_sas_phy *sas_phy = &phy->sas_phy; 946 struct asd_sas_port *sas_port = sas_phy->port; 947 struct hisi_sas_port *port = phy->port; 948 struct device *dev = hisi_hba->dev; 949 struct domain_device *port_dev; 950 int phy_no = sas_phy->id; 951 952 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) && 953 sas_port && port && (port->id != phy->port_id)) { 954 dev_info(dev, "phy%d's hw port id changed from %d to %llu\n", 955 phy_no, port->id, phy->port_id); 956 port_dev = sas_port->port_dev; 957 if (port_dev && !dev_is_expander(port_dev->dev_type)) { 958 /* 959 * Set the device state to gone to block 960 * sending IO to the device. 961 */ 962 set_bit(SAS_DEV_GONE, &port_dev->state); 963 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 964 return; 965 } 966 } 967 968 phy->wait_phyup_cnt = 0; 969 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 970 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 971 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); 972 } 973 974 static void hisi_sas_phyup_work(struct work_struct *work) 975 { 976 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); 977 } 978 979 static void hisi_sas_linkreset_work(struct work_struct *work) 980 { 981 struct hisi_sas_phy *phy = 982 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 983 struct asd_sas_phy *sas_phy = &phy->sas_phy; 984 985 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 986 } 987 988 static void hisi_sas_phyup_pm_work(struct work_struct *work) 989 { 990 struct hisi_sas_phy *phy = 991 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); 992 struct hisi_hba *hisi_hba = phy->hisi_hba; 993 struct device *dev = hisi_hba->dev; 994 995 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); 996 pm_runtime_put_sync(dev); 997 } 998 999 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 1000 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 1001 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 1002 [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, 1003 }; 1004 1005 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 1006 enum hisi_sas_phy_event event) 1007 { 1008 struct hisi_hba *hisi_hba = phy->hisi_hba; 1009 1010 if (WARN_ON(event >= HISI_PHYES_NUM)) 1011 return false; 1012 1013 return queue_work(hisi_hba->wq, &phy->works[event]); 1014 } 1015 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 1016 1017 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 1018 { 1019 struct hisi_sas_phy *phy = timer_container_of(phy, t, timer); 1020 struct hisi_hba *hisi_hba = phy->hisi_hba; 1021 struct device *dev = hisi_hba->dev; 1022 int phy_no = phy->sas_phy.id; 1023 1024 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 1025 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1026 } 1027 1028 #define HISI_SAS_WAIT_PHYUP_RETRIES 10 1029 1030 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 1031 { 1032 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1033 struct device *dev = hisi_hba->dev; 1034 unsigned long flags; 1035 1036 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 1037 spin_lock_irqsave(&phy->lock, flags); 1038 if (phy->phy_attached) { 1039 spin_unlock_irqrestore(&phy->lock, flags); 1040 return; 1041 } 1042 1043 if (!timer_pending(&phy->timer)) { 1044 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { 1045 phy->wait_phyup_cnt++; 1046 phy->timer.expires = jiffies + 1047 HISI_SAS_WAIT_PHYUP_TIMEOUT; 1048 add_timer(&phy->timer); 1049 spin_unlock_irqrestore(&phy->lock, flags); 1050 return; 1051 } 1052 1053 dev_warn(dev, "phy%d failed to come up %d times, giving up\n", 1054 phy_no, phy->wait_phyup_cnt); 1055 phy->wait_phyup_cnt = 0; 1056 } 1057 spin_unlock_irqrestore(&phy->lock, flags); 1058 } 1059 1060 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 1061 1062 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 1063 { 1064 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1065 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1066 int i; 1067 1068 phy->hisi_hba = hisi_hba; 1069 phy->port = NULL; 1070 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 1071 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 1072 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 1073 sas_phy->iproto = SAS_PROTOCOL_ALL; 1074 sas_phy->tproto = 0; 1075 sas_phy->role = PHY_ROLE_INITIATOR; 1076 sas_phy->oob_mode = OOB_NOT_CONNECTED; 1077 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 1078 sas_phy->id = phy_no; 1079 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 1080 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 1081 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 1082 sas_phy->lldd_phy = phy; 1083 1084 for (i = 0; i < HISI_PHYES_NUM; i++) 1085 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 1086 1087 spin_lock_init(&phy->lock); 1088 1089 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 1090 } 1091 1092 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 1093 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 1094 { 1095 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1096 struct asd_sas_phy *aphy = &phy->sas_phy; 1097 struct sas_phy *sphy = aphy->phy; 1098 unsigned long flags; 1099 1100 spin_lock_irqsave(&phy->lock, flags); 1101 1102 if (enable) { 1103 /* We may have been enabled already; if so, don't touch */ 1104 if (!phy->enable) 1105 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 1106 hisi_hba->hw->phy_start(hisi_hba, phy_no); 1107 } else { 1108 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 1109 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 1110 } 1111 phy->enable = enable; 1112 spin_unlock_irqrestore(&phy->lock, flags); 1113 } 1114 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 1115 1116 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 1117 { 1118 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 1119 struct asd_sas_port *sas_port = sas_phy->port; 1120 struct hisi_sas_port *port; 1121 1122 if (!sas_port) 1123 return; 1124 1125 port = to_hisi_sas_port(sas_port); 1126 port->port_attached = 1; 1127 port->id = phy->port_id; 1128 phy->port = port; 1129 sas_port->lldd_port = port; 1130 } 1131 1132 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1133 struct hisi_sas_slot *slot, bool need_lock) 1134 { 1135 if (task) { 1136 unsigned long flags; 1137 struct task_status_struct *ts; 1138 1139 ts = &task->task_status; 1140 1141 ts->resp = SAS_TASK_COMPLETE; 1142 ts->stat = SAS_ABORTED_TASK; 1143 spin_lock_irqsave(&task->task_state_lock, flags); 1144 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 1145 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1146 task->task_state_flags |= SAS_TASK_STATE_DONE; 1147 spin_unlock_irqrestore(&task->task_state_lock, flags); 1148 } 1149 1150 hisi_sas_slot_task_free(hisi_hba, task, slot, need_lock); 1151 } 1152 1153 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1154 struct domain_device *device) 1155 { 1156 struct hisi_sas_slot *slot, *slot2; 1157 struct hisi_sas_device *sas_dev = device->lldd_dev; 1158 1159 spin_lock(&sas_dev->lock); 1160 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1161 hisi_sas_do_release_task(hisi_hba, slot->task, slot, false); 1162 1163 spin_unlock(&sas_dev->lock); 1164 } 1165 1166 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1167 { 1168 struct hisi_sas_device *sas_dev; 1169 struct domain_device *device; 1170 int i; 1171 1172 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1173 sas_dev = &hisi_hba->devices[i]; 1174 device = sas_dev->sas_device; 1175 1176 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1177 !device) 1178 continue; 1179 1180 hisi_sas_release_task(hisi_hba, device); 1181 } 1182 } 1183 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1184 1185 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1186 struct domain_device *device) 1187 { 1188 if (hisi_hba->hw->dereg_device) 1189 hisi_hba->hw->dereg_device(hisi_hba, device); 1190 } 1191 1192 static int 1193 hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev, 1194 bool rst_ha_timeout) 1195 { 1196 struct hisi_sas_internal_abort_data data = { rst_ha_timeout }; 1197 struct domain_device *device = sas_dev->sas_device; 1198 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1199 int i, rc; 1200 1201 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 1202 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1203 const struct cpumask *mask = cq->irq_mask; 1204 1205 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 1206 continue; 1207 rc = sas_execute_internal_abort_dev(device, i, &data); 1208 if (rc) 1209 return rc; 1210 } 1211 1212 return 0; 1213 } 1214 1215 static void hisi_sas_dev_gone(struct domain_device *device) 1216 { 1217 struct hisi_sas_device *sas_dev = device->lldd_dev; 1218 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1219 struct device *dev = hisi_hba->dev; 1220 int ret = 0; 1221 1222 dev_info(dev, "dev[%d:%x] is gone\n", 1223 sas_dev->device_id, sas_dev->dev_type); 1224 1225 down(&hisi_hba->sem); 1226 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1227 hisi_sas_internal_task_abort_dev(sas_dev, true); 1228 1229 hisi_sas_dereg_device(hisi_hba, device); 1230 1231 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1232 device->lldd_dev = NULL; 1233 } 1234 1235 if (hisi_hba->hw->free_device) 1236 hisi_hba->hw->free_device(sas_dev); 1237 1238 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ 1239 if (!ret) 1240 sas_dev->dev_type = SAS_PHY_UNUSED; 1241 sas_dev->sas_device = NULL; 1242 up(&hisi_hba->sem); 1243 } 1244 1245 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1246 struct sas_phy_linkrates *r) 1247 { 1248 struct sas_phy_linkrates _r; 1249 1250 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1251 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1252 enum sas_linkrate min, max; 1253 1254 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1255 return -EINVAL; 1256 1257 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1258 max = sas_phy->phy->maximum_linkrate; 1259 min = r->minimum_linkrate; 1260 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1261 max = r->maximum_linkrate; 1262 min = sas_phy->phy->minimum_linkrate; 1263 } else 1264 return -EINVAL; 1265 1266 _r.maximum_linkrate = max; 1267 _r.minimum_linkrate = min; 1268 1269 sas_phy->phy->maximum_linkrate = max; 1270 sas_phy->phy->minimum_linkrate = min; 1271 1272 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1273 msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE); 1274 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1275 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1276 1277 return 0; 1278 } 1279 1280 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1281 void *funcdata) 1282 { 1283 struct hisi_sas_phy *phy = container_of(sas_phy, 1284 struct hisi_sas_phy, sas_phy); 1285 struct sas_ha_struct *sas_ha = sas_phy->ha; 1286 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1287 struct device *dev = hisi_hba->dev; 1288 DECLARE_COMPLETION_ONSTACK(completion); 1289 int phy_no = sas_phy->id; 1290 u8 sts = phy->phy_attached; 1291 int ret = 0; 1292 1293 down(&hisi_hba->sem); 1294 phy->reset_completion = &completion; 1295 1296 switch (func) { 1297 case PHY_FUNC_HARD_RESET: 1298 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1299 break; 1300 1301 case PHY_FUNC_LINK_RESET: 1302 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1303 msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE); 1304 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1305 break; 1306 1307 case PHY_FUNC_DISABLE: 1308 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1309 goto out; 1310 1311 case PHY_FUNC_SET_LINK_RATE: 1312 ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1313 break; 1314 1315 case PHY_FUNC_GET_EVENTS: 1316 if (hisi_hba->hw->get_events) { 1317 hisi_hba->hw->get_events(hisi_hba, phy_no); 1318 goto out; 1319 } 1320 fallthrough; 1321 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1322 default: 1323 ret = -EOPNOTSUPP; 1324 goto out; 1325 } 1326 1327 if (sts && !wait_for_completion_timeout(&completion, 1328 HISI_SAS_WAIT_PHYUP_TIMEOUT)) { 1329 dev_warn(dev, "phy%d wait phyup timed out for func %d\n", 1330 phy_no, func); 1331 if (phy->in_reset) 1332 ret = -ETIMEDOUT; 1333 } 1334 1335 out: 1336 phy->reset_completion = NULL; 1337 1338 up(&hisi_hba->sem); 1339 return ret; 1340 } 1341 1342 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1343 bool reset, int pmp, u8 *fis) 1344 { 1345 struct ata_taskfile tf; 1346 1347 ata_tf_init(dev, &tf); 1348 if (reset) 1349 tf.ctl |= ATA_SRST; 1350 else 1351 tf.ctl &= ~ATA_SRST; 1352 tf.command = ATA_CMD_DEV_RESET; 1353 ata_tf_to_fis(&tf, pmp, 0, fis); 1354 } 1355 1356 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1357 { 1358 u8 fis[FIS_BUF_SIZE] = {0}; 1359 struct ata_port *ap = device->sata_dev.ap; 1360 struct ata_link *link; 1361 int rc = TMF_RESP_FUNC_FAILED; 1362 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1363 struct device *dev = hisi_hba->dev; 1364 1365 ata_for_each_link(link, ap, EDGE) { 1366 int pmp = sata_srst_pmp(link); 1367 1368 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1369 rc = sas_execute_ata_cmd(device, fis, -1); 1370 if (rc != TMF_RESP_FUNC_COMPLETE) 1371 break; 1372 } 1373 1374 if (rc == TMF_RESP_FUNC_COMPLETE) { 1375 usleep_range(DELAY_FOR_SOFTRESET_MIN, DELAY_FOR_SOFTRESET_MAX); 1376 ata_for_each_link(link, ap, EDGE) { 1377 int pmp = sata_srst_pmp(link); 1378 1379 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1380 rc = sas_execute_ata_cmd(device, fis, -1); 1381 if (rc != TMF_RESP_FUNC_COMPLETE) 1382 dev_err(dev, "ata disk %016llx de-reset failed\n", 1383 SAS_ADDR(device->sas_addr)); 1384 } 1385 } else { 1386 dev_err(dev, "ata disk %016llx reset failed\n", 1387 SAS_ADDR(device->sas_addr)); 1388 } 1389 1390 if (rc == TMF_RESP_FUNC_COMPLETE) 1391 hisi_sas_release_task(hisi_hba, device); 1392 1393 return rc; 1394 } 1395 1396 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1397 { 1398 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1399 int i; 1400 1401 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1402 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1403 struct domain_device *device = sas_dev->sas_device; 1404 struct asd_sas_port *sas_port; 1405 struct hisi_sas_port *port; 1406 struct hisi_sas_phy *phy = NULL; 1407 struct asd_sas_phy *sas_phy; 1408 1409 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1410 || !device || !device->port) 1411 continue; 1412 1413 sas_port = device->port; 1414 port = to_hisi_sas_port(sas_port); 1415 1416 spin_lock(&sas_port->phy_list_lock); 1417 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1418 if (state & BIT(sas_phy->id)) { 1419 phy = sas_phy->lldd_phy; 1420 break; 1421 } 1422 spin_unlock(&sas_port->phy_list_lock); 1423 1424 if (phy) { 1425 port->id = phy->port_id; 1426 1427 /* Update linkrate of directly attached device. */ 1428 if (!device->parent) 1429 device->linkrate = phy->sas_phy.linkrate; 1430 1431 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1432 } else if (!port->port_attached) 1433 port->id = 0xff; 1434 } 1435 } 1436 1437 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) 1438 { 1439 u32 new_state = hisi_hba->hw->get_phys_state(hisi_hba); 1440 struct asd_sas_port *_sas_port = NULL; 1441 int phy_no; 1442 1443 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1444 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1445 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1446 struct asd_sas_port *sas_port = sas_phy->port; 1447 bool do_port_check = _sas_port != sas_port; 1448 1449 if (!sas_phy->phy->enabled) 1450 continue; 1451 1452 /* Report PHY state change to libsas */ 1453 if (new_state & BIT(phy_no)) { 1454 if (do_port_check && sas_port && sas_port->port_dev) { 1455 struct domain_device *dev = sas_port->port_dev; 1456 1457 _sas_port = sas_port; 1458 1459 if (dev_is_expander(dev->dev_type)) 1460 sas_notify_port_event(sas_phy, 1461 PORTE_BROADCAST_RCVD, 1462 GFP_KERNEL); 1463 } 1464 } else { 1465 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); 1466 1467 /* 1468 * The new_state is not ready but old_state is ready, 1469 * the two possible causes: 1470 * 1. The connected device is removed 1471 * 2. Device exists but phyup timed out 1472 */ 1473 if (state & BIT(phy_no)) 1474 hisi_sas_notify_phy_event(phy, 1475 HISI_PHYE_LINK_RESET); 1476 } 1477 } 1478 } 1479 1480 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1481 { 1482 struct hisi_sas_device *sas_dev; 1483 struct domain_device *device; 1484 int i; 1485 1486 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1487 sas_dev = &hisi_hba->devices[i]; 1488 device = sas_dev->sas_device; 1489 1490 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1491 continue; 1492 1493 hisi_sas_init_device(device); 1494 } 1495 } 1496 1497 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1498 struct asd_sas_port *sas_port, 1499 struct domain_device *device) 1500 { 1501 struct ata_port *ap = device->sata_dev.ap; 1502 struct device *dev = hisi_hba->dev; 1503 int rc = TMF_RESP_FUNC_FAILED; 1504 struct ata_link *link; 1505 u8 fis[FIS_BUF_SIZE] = {0}; 1506 int i; 1507 1508 for (i = 0; i < hisi_hba->n_phy; i++) { 1509 if (!(sas_port->phy_mask & BIT(i))) 1510 continue; 1511 1512 ata_for_each_link(link, ap, EDGE) { 1513 int pmp = sata_srst_pmp(link); 1514 1515 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1516 rc = sas_execute_ata_cmd(device, fis, i); 1517 if (rc != TMF_RESP_FUNC_COMPLETE) { 1518 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1519 i, rc); 1520 break; 1521 } 1522 } 1523 } 1524 } 1525 1526 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1527 { 1528 struct device *dev = hisi_hba->dev; 1529 int port_no, rc, i; 1530 1531 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1532 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1533 struct domain_device *device = sas_dev->sas_device; 1534 1535 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1536 continue; 1537 1538 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1539 if (rc < 0) 1540 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1541 } 1542 1543 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1544 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1545 struct asd_sas_port *sas_port = &port->sas_port; 1546 struct domain_device *port_dev = sas_port->port_dev; 1547 struct domain_device *device; 1548 1549 if (!port_dev || !dev_is_expander(port_dev->dev_type)) 1550 continue; 1551 1552 /* Try to find a SATA device */ 1553 list_for_each_entry(device, &sas_port->dev_list, 1554 dev_list_node) { 1555 if (dev_is_sata(device)) { 1556 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1557 sas_port, 1558 device); 1559 break; 1560 } 1561 } 1562 } 1563 } 1564 1565 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1566 { 1567 struct Scsi_Host *shost = hisi_hba->shost; 1568 1569 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1570 1571 scsi_block_requests(shost); 1572 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 1573 WAIT_CMD_COMPLETE_DELAY, 1574 WAIT_CMD_COMPLETE_TMROUT); 1575 1576 /* 1577 * hisi_hba->timer is only used for v1/v2 hw, and check hw->sht 1578 * which is also only used for v1/v2 hw to skip it for v3 hw 1579 */ 1580 if (hisi_hba->hw->sht) 1581 timer_delete_sync(&hisi_hba->timer); 1582 1583 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1584 } 1585 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1586 1587 static void hisi_sas_async_init_wait_phyup(void *data, async_cookie_t cookie) 1588 { 1589 struct hisi_sas_phy *phy = data; 1590 struct hisi_hba *hisi_hba = phy->hisi_hba; 1591 struct device *dev = hisi_hba->dev; 1592 DECLARE_COMPLETION_ONSTACK(completion); 1593 int phy_no = phy->sas_phy.id; 1594 1595 phy->reset_completion = &completion; 1596 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1597 if (!wait_for_completion_timeout(&completion, 1598 HISI_SAS_WAIT_PHYUP_TIMEOUT)) 1599 dev_warn(dev, "phy%d wait phyup timed out\n", phy_no); 1600 1601 phy->reset_completion = NULL; 1602 } 1603 1604 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1605 { 1606 struct Scsi_Host *shost = hisi_hba->shost; 1607 ASYNC_DOMAIN_EXCLUSIVE(async); 1608 int phy_no; 1609 1610 /* Init and wait for PHYs to come up and all libsas event finished. */ 1611 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1612 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1613 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1614 1615 if (!sas_phy->phy->enabled) 1616 continue; 1617 1618 if (!(hisi_hba->phy_state & BIT(phy_no))) { 1619 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1620 continue; 1621 } 1622 1623 async_schedule_domain(hisi_sas_async_init_wait_phyup, 1624 phy, &async); 1625 } 1626 1627 async_synchronize_full_domain(&async); 1628 hisi_sas_refresh_port_id(hisi_hba); 1629 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1630 1631 if (hisi_hba->reject_stp_links_msk) 1632 hisi_sas_terminate_stp_reject(hisi_hba); 1633 hisi_sas_reset_init_all_devices(hisi_hba); 1634 scsi_unblock_requests(shost); 1635 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1636 up(&hisi_hba->sem); 1637 1638 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); 1639 } 1640 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1641 1642 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) 1643 { 1644 if (!hisi_hba->hw->soft_reset) 1645 return -ENOENT; 1646 1647 down(&hisi_hba->sem); 1648 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1649 up(&hisi_hba->sem); 1650 return -EPERM; 1651 } 1652 1653 if (hisi_sas_debugfs_enable) 1654 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 1655 1656 return 0; 1657 } 1658 1659 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1660 { 1661 struct device *dev = hisi_hba->dev; 1662 struct Scsi_Host *shost = hisi_hba->shost; 1663 int rc; 1664 1665 dev_info(dev, "controller resetting...\n"); 1666 hisi_sas_controller_reset_prepare(hisi_hba); 1667 1668 rc = hisi_hba->hw->soft_reset(hisi_hba); 1669 if (rc) { 1670 dev_warn(dev, "controller reset failed (%d)\n", rc); 1671 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1672 up(&hisi_hba->sem); 1673 scsi_unblock_requests(shost); 1674 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1675 return rc; 1676 } 1677 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1678 1679 hisi_sas_controller_reset_done(hisi_hba); 1680 dev_info(dev, "controller reset complete\n"); 1681 1682 return 0; 1683 } 1684 1685 static int hisi_sas_abort_task(struct sas_task *task) 1686 { 1687 struct hisi_sas_internal_abort_data internal_abort_data = { false }; 1688 struct domain_device *device = task->dev; 1689 struct hisi_sas_device *sas_dev = device->lldd_dev; 1690 struct hisi_sas_slot *slot = task->lldd_task; 1691 struct hisi_hba *hisi_hba; 1692 struct device *dev; 1693 int rc = TMF_RESP_FUNC_FAILED; 1694 unsigned long flags; 1695 1696 if (!sas_dev) 1697 return TMF_RESP_FUNC_FAILED; 1698 1699 hisi_hba = dev_to_hisi_hba(task->dev); 1700 dev = hisi_hba->dev; 1701 1702 spin_lock_irqsave(&task->task_state_lock, flags); 1703 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1704 struct hisi_sas_cq *cq; 1705 1706 if (slot) { 1707 /* 1708 * sync irq or poll queue to avoid free'ing task 1709 * before using task in IO completion 1710 */ 1711 cq = &hisi_hba->cq[slot->dlvry_queue]; 1712 hisi_sas_sync_cq(cq); 1713 } 1714 spin_unlock_irqrestore(&task->task_state_lock, flags); 1715 rc = TMF_RESP_FUNC_COMPLETE; 1716 goto out; 1717 } 1718 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1719 spin_unlock_irqrestore(&task->task_state_lock, flags); 1720 1721 if (!slot) 1722 goto out; 1723 1724 if (task->task_proto & SAS_PROTOCOL_SSP) { 1725 u16 tag = slot->idx; 1726 int rc2; 1727 1728 rc = sas_abort_task(task, tag); 1729 rc2 = sas_execute_internal_abort_single(device, tag, 1730 slot->dlvry_queue, &internal_abort_data); 1731 if (rc2 < 0) { 1732 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1733 return TMF_RESP_FUNC_FAILED; 1734 } 1735 1736 /* 1737 * If the TMF finds that the IO is not in the device and also 1738 * the internal abort does not succeed, then it is safe to 1739 * free the slot. 1740 * Note: if the internal abort succeeds then the slot 1741 * will have already been completed 1742 */ 1743 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1744 if (task->lldd_task) 1745 hisi_sas_do_release_task(hisi_hba, task, slot, true); 1746 } 1747 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1748 task->task_proto & SAS_PROTOCOL_STP) { 1749 if (task->dev->dev_type == SAS_SATA_DEV) { 1750 struct ata_queued_cmd *qc = task->uldd_task; 1751 1752 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1753 if (rc < 0) { 1754 dev_err(dev, "abort task: internal abort failed\n"); 1755 goto out; 1756 } 1757 hisi_sas_dereg_device(hisi_hba, device); 1758 1759 /* 1760 * If an ATA internal command times out in ATA EH, it 1761 * need to execute soft reset, so check the scsicmd 1762 */ 1763 if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) && 1764 qc && qc->scsicmd) { 1765 hisi_sas_do_release_task(hisi_hba, task, slot, true); 1766 rc = TMF_RESP_FUNC_COMPLETE; 1767 } else { 1768 rc = hisi_sas_softreset_ata_disk(device); 1769 } 1770 } 1771 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 1772 /* SMP */ 1773 u32 tag = slot->idx; 1774 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1775 1776 rc = sas_execute_internal_abort_single(device, 1777 tag, slot->dlvry_queue, 1778 &internal_abort_data); 1779 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1780 task->lldd_task) { 1781 /* 1782 * sync irq or poll queue to avoid free'ing task 1783 * before using task in IO completion 1784 */ 1785 hisi_sas_sync_cq(cq); 1786 slot->task = NULL; 1787 } 1788 } 1789 1790 out: 1791 if (rc != TMF_RESP_FUNC_COMPLETE) 1792 dev_notice(dev, "abort task: rc=%d\n", rc); 1793 return rc; 1794 } 1795 1796 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1797 { 1798 struct hisi_sas_device *sas_dev = device->lldd_dev; 1799 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1800 struct device *dev = hisi_hba->dev; 1801 int rc; 1802 1803 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1804 if (rc < 0) { 1805 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1806 return TMF_RESP_FUNC_FAILED; 1807 } 1808 hisi_sas_dereg_device(hisi_hba, device); 1809 1810 rc = sas_abort_task_set(device, lun); 1811 if (rc == TMF_RESP_FUNC_COMPLETE) 1812 hisi_sas_release_task(hisi_hba, device); 1813 1814 return rc; 1815 } 1816 1817 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1818 { 1819 struct sas_phy *local_phy = sas_get_local_phy(device); 1820 struct hisi_sas_device *sas_dev = device->lldd_dev; 1821 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1822 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1823 int rc, reset_type; 1824 1825 if (!local_phy->enabled) { 1826 sas_put_local_phy(local_phy); 1827 return -ENODEV; 1828 } 1829 1830 if (scsi_is_sas_phy_local(local_phy)) { 1831 struct asd_sas_phy *sas_phy = 1832 sas_ha->sas_phy[local_phy->number]; 1833 struct hisi_sas_phy *phy = 1834 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1835 unsigned long flags; 1836 1837 spin_lock_irqsave(&phy->lock, flags); 1838 phy->in_reset = 1; 1839 spin_unlock_irqrestore(&phy->lock, flags); 1840 } 1841 1842 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1843 !dev_is_sata(device)) ? true : false; 1844 1845 rc = sas_phy_reset(local_phy, reset_type); 1846 sas_put_local_phy(local_phy); 1847 1848 if (scsi_is_sas_phy_local(local_phy)) { 1849 struct asd_sas_phy *sas_phy = 1850 sas_ha->sas_phy[local_phy->number]; 1851 struct hisi_sas_phy *phy = 1852 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1853 unsigned long flags; 1854 1855 spin_lock_irqsave(&phy->lock, flags); 1856 phy->in_reset = 0; 1857 spin_unlock_irqrestore(&phy->lock, flags); 1858 1859 /* report PHY down if timed out */ 1860 if (rc == -ETIMEDOUT) 1861 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); 1862 return rc; 1863 } 1864 1865 /* Remote phy */ 1866 if (rc) 1867 return rc; 1868 1869 if (dev_is_sata(device)) { 1870 struct ata_link *link = &device->sata_dev.ap->link; 1871 1872 rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT, 1873 smp_ata_check_ready_type); 1874 } else { 1875 msleep(DELAY_FOR_LINK_READY); 1876 } 1877 1878 return rc; 1879 } 1880 1881 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1882 { 1883 struct hisi_sas_device *sas_dev = device->lldd_dev; 1884 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1885 struct device *dev = hisi_hba->dev; 1886 int rc; 1887 1888 if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) 1889 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 1890 1891 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1892 if (rc < 0) { 1893 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1894 return TMF_RESP_FUNC_FAILED; 1895 } 1896 hisi_sas_dereg_device(hisi_hba, device); 1897 1898 if (dev_is_sata(device)) { 1899 rc = hisi_sas_softreset_ata_disk(device); 1900 if (rc == TMF_RESP_FUNC_FAILED) 1901 dev_err(dev, "ata disk %016llx reset (%d)\n", 1902 SAS_ADDR(device->sas_addr), rc); 1903 } 1904 1905 rc = hisi_sas_debug_I_T_nexus_reset(device); 1906 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1907 hisi_sas_release_task(hisi_hba, device); 1908 1909 return rc; 1910 } 1911 1912 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1913 { 1914 struct hisi_sas_device *sas_dev = device->lldd_dev; 1915 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1916 struct device *dev = hisi_hba->dev; 1917 int rc = TMF_RESP_FUNC_FAILED; 1918 1919 /* Clear internal IO and then lu reset */ 1920 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1921 if (rc < 0) { 1922 dev_err(dev, "lu_reset: internal abort failed\n"); 1923 goto out; 1924 } 1925 hisi_sas_dereg_device(hisi_hba, device); 1926 1927 if (dev_is_sata(device)) { 1928 struct sas_phy *phy = sas_get_local_phy(device); 1929 1930 rc = sas_phy_reset(phy, true); 1931 if (rc == 0) 1932 hisi_sas_release_task(hisi_hba, device); 1933 sas_put_local_phy(phy); 1934 } else { 1935 rc = sas_lu_reset(device, lun); 1936 if (rc == TMF_RESP_FUNC_COMPLETE) 1937 hisi_sas_release_task(hisi_hba, device); 1938 } 1939 out: 1940 if (rc != TMF_RESP_FUNC_COMPLETE) 1941 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1942 sas_dev->device_id, rc); 1943 return rc; 1944 } 1945 1946 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) 1947 { 1948 struct domain_device *device = data; 1949 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1950 int rc; 1951 1952 rc = hisi_sas_debug_I_T_nexus_reset(device); 1953 if (rc != TMF_RESP_FUNC_COMPLETE) 1954 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", 1955 SAS_ADDR(device->sas_addr), rc); 1956 } 1957 1958 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1959 { 1960 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1961 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1962 ASYNC_DOMAIN_EXCLUSIVE(async); 1963 int i; 1964 1965 queue_work(hisi_hba->wq, &r.work); 1966 wait_for_completion(r.completion); 1967 if (!r.done) 1968 return TMF_RESP_FUNC_FAILED; 1969 1970 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1971 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1972 struct domain_device *device = sas_dev->sas_device; 1973 1974 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1975 dev_is_expander(device->dev_type)) 1976 continue; 1977 1978 async_schedule_domain(hisi_sas_async_I_T_nexus_reset, 1979 device, &async); 1980 } 1981 1982 async_synchronize_full_domain(&async); 1983 hisi_sas_release_tasks(hisi_hba); 1984 1985 return TMF_RESP_FUNC_COMPLETE; 1986 } 1987 1988 static int hisi_sas_query_task(struct sas_task *task) 1989 { 1990 int rc = TMF_RESP_FUNC_FAILED; 1991 1992 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1993 struct hisi_sas_slot *slot = task->lldd_task; 1994 u32 tag = slot->idx; 1995 1996 rc = sas_query_task(task, tag); 1997 switch (rc) { 1998 /* The task is still in Lun, release it then */ 1999 case TMF_RESP_FUNC_SUCC: 2000 /* The task is not in Lun or failed, reset the phy */ 2001 case TMF_RESP_FUNC_FAILED: 2002 case TMF_RESP_FUNC_COMPLETE: 2003 break; 2004 default: 2005 rc = TMF_RESP_FUNC_FAILED; 2006 break; 2007 } 2008 } 2009 return rc; 2010 } 2011 2012 static bool hisi_sas_internal_abort_timeout(struct sas_task *task, 2013 void *data) 2014 { 2015 struct domain_device *device = task->dev; 2016 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 2017 struct hisi_sas_internal_abort_data *timeout = data; 2018 2019 if (hisi_sas_debugfs_enable) { 2020 /* 2021 * If timeout occurs in device gone scenario, to avoid 2022 * circular dependency like: 2023 * hisi_sas_dev_gone() -> down() -> ... -> 2024 * hisi_sas_internal_abort_timeout() -> down(). 2025 */ 2026 if (!timeout->rst_ha_timeout) 2027 down(&hisi_hba->sem); 2028 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 2029 if (!timeout->rst_ha_timeout) 2030 up(&hisi_hba->sem); 2031 } 2032 2033 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 2034 pr_err("Internal abort: timeout %016llx\n", 2035 SAS_ADDR(device->sas_addr)); 2036 } else { 2037 struct hisi_sas_slot *slot = task->lldd_task; 2038 2039 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 2040 2041 if (slot) { 2042 struct hisi_sas_cq *cq = 2043 &hisi_hba->cq[slot->dlvry_queue]; 2044 /* 2045 * sync irq or poll queue to avoid free'ing task 2046 * before using task in IO completion 2047 */ 2048 hisi_sas_sync_cq(cq); 2049 slot->task = NULL; 2050 } 2051 2052 if (timeout->rst_ha_timeout) { 2053 pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n", 2054 SAS_ADDR(device->sas_addr)); 2055 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2056 } else { 2057 pr_err("Internal abort: timeout and not done %016llx.\n", 2058 SAS_ADDR(device->sas_addr)); 2059 } 2060 2061 return true; 2062 } 2063 2064 return false; 2065 } 2066 2067 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 2068 { 2069 hisi_sas_port_notify_formed(sas_phy); 2070 } 2071 2072 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 2073 u8 reg_index, u8 reg_count, u8 *write_data) 2074 { 2075 struct hisi_hba *hisi_hba = sha->lldd_ha; 2076 2077 if (!hisi_hba->hw->write_gpio) 2078 return -EOPNOTSUPP; 2079 2080 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 2081 reg_index, reg_count, write_data); 2082 } 2083 2084 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 2085 { 2086 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2087 struct sas_phy *sphy = sas_phy->phy; 2088 unsigned long flags; 2089 2090 phy->phy_attached = 0; 2091 phy->phy_type = 0; 2092 phy->port = NULL; 2093 2094 spin_lock_irqsave(&phy->lock, flags); 2095 if (phy->enable) 2096 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 2097 else 2098 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 2099 spin_unlock_irqrestore(&phy->lock, flags); 2100 } 2101 2102 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, 2103 gfp_t gfp_flags) 2104 { 2105 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2106 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2107 struct device *dev = hisi_hba->dev; 2108 2109 if (rdy) { 2110 /* Phy down but ready */ 2111 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); 2112 hisi_sas_port_notify_formed(sas_phy); 2113 } else { 2114 struct hisi_sas_port *port = phy->port; 2115 2116 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || 2117 phy->in_reset) { 2118 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 2119 return; 2120 } 2121 /* Phy down and not ready */ 2122 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); 2123 sas_phy_disconnected(sas_phy); 2124 2125 if (port) { 2126 if (phy->phy_type & PORT_TYPE_SAS) { 2127 int port_id = port->id; 2128 2129 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 2130 port_id)) 2131 port->port_attached = 0; 2132 } else if (phy->phy_type & PORT_TYPE_SATA) 2133 port->port_attached = 0; 2134 } 2135 hisi_sas_phy_disconnected(phy); 2136 } 2137 } 2138 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 2139 2140 void hisi_sas_phy_bcast(struct hisi_sas_phy *phy) 2141 { 2142 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2143 struct hisi_hba *hisi_hba = phy->hisi_hba; 2144 2145 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) 2146 return; 2147 2148 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC); 2149 } 2150 EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast); 2151 2152 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2153 { 2154 struct hisi_hba *hisi_hba = shost_priv(shost); 2155 2156 if (reset_type != SCSI_ADAPTER_RESET) 2157 return -EOPNOTSUPP; 2158 2159 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2160 2161 return 0; 2162 } 2163 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2164 2165 struct scsi_transport_template *hisi_sas_stt; 2166 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2167 2168 static struct sas_domain_function_template hisi_sas_transport_ops = { 2169 .lldd_dev_found = hisi_sas_dev_found, 2170 .lldd_dev_gone = hisi_sas_dev_gone, 2171 .lldd_execute_task = hisi_sas_queue_command, 2172 .lldd_control_phy = hisi_sas_control_phy, 2173 .lldd_abort_task = hisi_sas_abort_task, 2174 .lldd_abort_task_set = hisi_sas_abort_task_set, 2175 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2176 .lldd_lu_reset = hisi_sas_lu_reset, 2177 .lldd_query_task = hisi_sas_query_task, 2178 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2179 .lldd_port_formed = hisi_sas_port_formed, 2180 .lldd_write_gpio = hisi_sas_write_gpio, 2181 .lldd_tmf_aborted = hisi_sas_tmf_aborted, 2182 .lldd_abort_timeout = hisi_sas_internal_abort_timeout, 2183 }; 2184 2185 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2186 { 2187 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; 2188 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2189 2190 for (i = 0; i < hisi_hba->queue_count; i++) { 2191 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2192 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2193 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2194 2195 s = sizeof(struct hisi_sas_cmd_hdr); 2196 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2197 memset(&cmd_hdr[j], 0, s); 2198 2199 dq->wr_point = 0; 2200 2201 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2202 memset(hisi_hba->complete_hdr[i], 0, s); 2203 cq->rd_point = 0; 2204 } 2205 2206 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2207 memset(hisi_hba->initial_fis, 0, s); 2208 2209 s = max_command_entries * sizeof(struct hisi_sas_iost); 2210 memset(hisi_hba->iost, 0, s); 2211 2212 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2213 memset(hisi_hba->breakpoint, 0, s); 2214 2215 s = sizeof(struct hisi_sas_sata_breakpoint); 2216 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2217 memset(&sata_breakpoint[j], 0, s); 2218 } 2219 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2220 2221 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2222 { 2223 struct device *dev = hisi_hba->dev; 2224 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; 2225 int max_command_entries_ru, sz_slot_buf_ru; 2226 int blk_cnt, slots_per_blk; 2227 2228 sema_init(&hisi_hba->sem, 1); 2229 spin_lock_init(&hisi_hba->lock); 2230 for (i = 0; i < hisi_hba->n_phy; i++) { 2231 hisi_sas_phy_init(hisi_hba, i); 2232 hisi_hba->port[i].port_attached = 0; 2233 hisi_hba->port[i].id = -1; 2234 } 2235 2236 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2237 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2238 hisi_hba->devices[i].device_id = i; 2239 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2240 } 2241 2242 for (i = 0; i < hisi_hba->queue_count; i++) { 2243 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2244 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2245 2246 /* Completion queue structure */ 2247 cq->id = i; 2248 cq->hisi_hba = hisi_hba; 2249 spin_lock_init(&cq->poll_lock); 2250 2251 /* Delivery queue structure */ 2252 spin_lock_init(&dq->lock); 2253 INIT_LIST_HEAD(&dq->list); 2254 dq->id = i; 2255 dq->hisi_hba = hisi_hba; 2256 2257 /* Delivery queue */ 2258 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2259 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2260 &hisi_hba->cmd_hdr_dma[i], 2261 GFP_KERNEL); 2262 if (!hisi_hba->cmd_hdr[i]) 2263 goto err_out; 2264 2265 /* Completion queue */ 2266 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2267 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2268 &hisi_hba->complete_hdr_dma[i], 2269 GFP_KERNEL); 2270 if (!hisi_hba->complete_hdr[i]) 2271 goto err_out; 2272 } 2273 2274 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2275 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2276 GFP_KERNEL); 2277 if (!hisi_hba->itct) 2278 goto err_out; 2279 2280 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2281 sizeof(struct hisi_sas_slot), 2282 GFP_KERNEL); 2283 if (!hisi_hba->slot_info) 2284 goto err_out; 2285 2286 /* roundup to avoid overly large block size */ 2287 max_command_entries_ru = roundup(max_command_entries, 2288 BLK_CNT_OPTIMIZE_MARK); 2289 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2290 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2291 else 2292 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2293 2294 sz_slot_buf_ru = roundup(sz_slot_buf_ru, BLK_CNT_OPTIMIZE_MARK); 2295 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); 2296 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2297 slots_per_blk = s / sz_slot_buf_ru; 2298 2299 for (i = 0; i < blk_cnt; i++) { 2300 int slot_index = i * slots_per_blk; 2301 dma_addr_t buf_dma; 2302 void *buf; 2303 2304 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2305 GFP_KERNEL); 2306 if (!buf) 2307 goto err_out; 2308 2309 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2310 struct hisi_sas_slot *slot; 2311 2312 slot = &hisi_hba->slot_info[slot_index]; 2313 slot->buf = buf; 2314 slot->buf_dma = buf_dma; 2315 slot->idx = slot_index; 2316 2317 buf += sz_slot_buf_ru; 2318 buf_dma += sz_slot_buf_ru; 2319 } 2320 } 2321 2322 s = max_command_entries * sizeof(struct hisi_sas_iost); 2323 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2324 GFP_KERNEL); 2325 if (!hisi_hba->iost) 2326 goto err_out; 2327 2328 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2329 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2330 &hisi_hba->breakpoint_dma, 2331 GFP_KERNEL); 2332 if (!hisi_hba->breakpoint) 2333 goto err_out; 2334 2335 s = hisi_hba->slot_index_count = max_command_entries; 2336 hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); 2337 if (!hisi_hba->slot_index_tags) 2338 goto err_out; 2339 2340 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2341 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2342 &hisi_hba->initial_fis_dma, 2343 GFP_KERNEL); 2344 if (!hisi_hba->initial_fis) 2345 goto err_out; 2346 2347 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2348 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2349 &hisi_hba->sata_breakpoint_dma, 2350 GFP_KERNEL); 2351 if (!hisi_hba->sata_breakpoint) 2352 goto err_out; 2353 2354 hisi_hba->last_slot_index = 0; 2355 2356 hisi_hba->wq = 2357 alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, dev_name(dev)); 2358 if (!hisi_hba->wq) { 2359 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2360 goto err_out; 2361 } 2362 2363 return 0; 2364 err_out: 2365 return -ENOMEM; 2366 } 2367 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2368 2369 void hisi_sas_free(struct hisi_hba *hisi_hba) 2370 { 2371 int i; 2372 2373 for (i = 0; i < hisi_hba->n_phy; i++) { 2374 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2375 2376 timer_delete_sync(&phy->timer); 2377 } 2378 2379 if (hisi_hba->wq) 2380 destroy_workqueue(hisi_hba->wq); 2381 } 2382 EXPORT_SYMBOL_GPL(hisi_sas_free); 2383 2384 void hisi_sas_rst_work_handler(struct work_struct *work) 2385 { 2386 struct hisi_hba *hisi_hba = 2387 container_of(work, struct hisi_hba, rst_work); 2388 2389 if (hisi_sas_controller_prereset(hisi_hba)) 2390 return; 2391 2392 hisi_sas_controller_reset(hisi_hba); 2393 } 2394 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2395 2396 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2397 { 2398 struct hisi_sas_rst *rst = 2399 container_of(work, struct hisi_sas_rst, work); 2400 2401 if (hisi_sas_controller_prereset(rst->hisi_hba)) 2402 goto rst_complete; 2403 2404 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2405 rst->done = true; 2406 rst_complete: 2407 complete(rst->completion); 2408 } 2409 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2410 2411 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2412 { 2413 struct device *dev = hisi_hba->dev; 2414 struct platform_device *pdev = hisi_hba->platform_dev; 2415 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2416 struct clk *refclk; 2417 2418 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2419 SAS_ADDR_SIZE)) { 2420 dev_err(dev, "could not get property sas-addr\n"); 2421 return -ENOENT; 2422 } 2423 2424 if (np) { 2425 /* 2426 * These properties are only required for platform device-based 2427 * controller with DT firmware. 2428 */ 2429 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2430 "hisilicon,sas-syscon"); 2431 if (IS_ERR(hisi_hba->ctrl)) { 2432 dev_err(dev, "could not get syscon\n"); 2433 return -ENOENT; 2434 } 2435 2436 if (device_property_read_u32(dev, "ctrl-reset-reg", 2437 &hisi_hba->ctrl_reset_reg)) { 2438 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2439 return -ENOENT; 2440 } 2441 2442 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2443 &hisi_hba->ctrl_reset_sts_reg)) { 2444 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2445 return -ENOENT; 2446 } 2447 2448 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2449 &hisi_hba->ctrl_clock_ena_reg)) { 2450 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2451 return -ENOENT; 2452 } 2453 } 2454 2455 refclk = devm_clk_get(dev, NULL); 2456 if (IS_ERR(refclk)) 2457 dev_dbg(dev, "no ref clk property\n"); 2458 else 2459 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 2460 HZ_TO_MHZ; 2461 2462 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2463 dev_err(dev, "could not get property phy-count\n"); 2464 return -ENOENT; 2465 } 2466 2467 if (device_property_read_u32(dev, "queue-count", 2468 &hisi_hba->queue_count)) { 2469 dev_err(dev, "could not get property queue-count\n"); 2470 return -ENOENT; 2471 } 2472 2473 return 0; 2474 } 2475 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2476 2477 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2478 const struct hisi_sas_hw *hw) 2479 { 2480 struct resource *res; 2481 struct Scsi_Host *shost; 2482 struct hisi_hba *hisi_hba; 2483 struct device *dev = &pdev->dev; 2484 int error; 2485 2486 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2487 if (!shost) { 2488 dev_err(dev, "scsi host alloc failed\n"); 2489 return NULL; 2490 } 2491 hisi_hba = shost_priv(shost); 2492 2493 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2494 hisi_hba->hw = hw; 2495 hisi_hba->dev = dev; 2496 hisi_hba->platform_dev = pdev; 2497 hisi_hba->shost = shost; 2498 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2499 2500 timer_setup(&hisi_hba->timer, NULL, 0); 2501 2502 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2503 goto err_out; 2504 2505 if (hisi_hba->hw->fw_info_check) { 2506 if (hisi_hba->hw->fw_info_check(hisi_hba)) 2507 goto err_out; 2508 } 2509 2510 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2511 if (error) { 2512 dev_err(dev, "No usable DMA addressing method\n"); 2513 goto err_out; 2514 } 2515 2516 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); 2517 if (IS_ERR(hisi_hba->regs)) 2518 goto err_out; 2519 2520 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2521 if (res) { 2522 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2523 if (IS_ERR(hisi_hba->sgpio_regs)) 2524 goto err_out; 2525 } 2526 2527 if (hisi_sas_alloc(hisi_hba)) { 2528 hisi_sas_free(hisi_hba); 2529 goto err_out; 2530 } 2531 2532 return shost; 2533 err_out: 2534 scsi_host_put(shost); 2535 dev_err(dev, "shost alloc failed\n"); 2536 return NULL; 2537 } 2538 2539 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) 2540 { 2541 if (hisi_hba->hw->interrupt_preinit) 2542 return hisi_hba->hw->interrupt_preinit(hisi_hba); 2543 return 0; 2544 } 2545 2546 int hisi_sas_probe(struct platform_device *pdev, 2547 const struct hisi_sas_hw *hw) 2548 { 2549 struct Scsi_Host *shost; 2550 struct hisi_hba *hisi_hba; 2551 struct device *dev = &pdev->dev; 2552 struct asd_sas_phy **arr_phy; 2553 struct asd_sas_port **arr_port; 2554 struct sas_ha_struct *sha; 2555 int rc, phy_nr, port_nr, i; 2556 2557 shost = hisi_sas_shost_alloc(pdev, hw); 2558 if (!shost) 2559 return -ENOMEM; 2560 2561 sha = SHOST_TO_SAS_HA(shost); 2562 hisi_hba = shost_priv(shost); 2563 platform_set_drvdata(pdev, sha); 2564 2565 phy_nr = port_nr = hisi_hba->n_phy; 2566 2567 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2568 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2569 if (!arr_phy || !arr_port) { 2570 rc = -ENOMEM; 2571 goto err_out_ha; 2572 } 2573 2574 sha->sas_phy = arr_phy; 2575 sha->sas_port = arr_port; 2576 sha->lldd_ha = hisi_hba; 2577 2578 shost->transportt = hisi_sas_stt; 2579 shost->max_id = HISI_SAS_MAX_DEVICES; 2580 shost->max_lun = ~0; 2581 shost->max_channel = 1; 2582 shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN; 2583 if (hisi_hba->hw->slot_index_alloc) { 2584 shost->can_queue = HISI_SAS_MAX_COMMANDS; 2585 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; 2586 } else { 2587 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 2588 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 2589 } 2590 2591 sha->sas_ha_name = DRV_NAME; 2592 sha->dev = hisi_hba->dev; 2593 sha->sas_addr = &hisi_hba->sas_addr[0]; 2594 sha->num_phys = hisi_hba->n_phy; 2595 sha->shost = hisi_hba->shost; 2596 2597 for (i = 0; i < hisi_hba->n_phy; i++) { 2598 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2599 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2600 } 2601 2602 rc = hisi_sas_interrupt_preinit(hisi_hba); 2603 if (rc) 2604 goto err_out_ha; 2605 2606 rc = scsi_add_host(shost, &pdev->dev); 2607 if (rc) 2608 goto err_out_ha; 2609 2610 rc = sas_register_ha(sha); 2611 if (rc) 2612 goto err_out_register_ha; 2613 2614 rc = hisi_hba->hw->hw_init(hisi_hba); 2615 if (rc) 2616 goto err_out_hw_init; 2617 2618 scsi_scan_host(shost); 2619 2620 return 0; 2621 2622 err_out_hw_init: 2623 sas_unregister_ha(sha); 2624 err_out_register_ha: 2625 scsi_remove_host(shost); 2626 err_out_ha: 2627 hisi_sas_free(hisi_hba); 2628 scsi_host_put(shost); 2629 return rc; 2630 } 2631 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2632 2633 void hisi_sas_remove(struct platform_device *pdev) 2634 { 2635 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2636 struct hisi_hba *hisi_hba = sha->lldd_ha; 2637 struct Scsi_Host *shost = sha->shost; 2638 2639 timer_delete_sync(&hisi_hba->timer); 2640 2641 sas_unregister_ha(sha); 2642 sas_remove_host(shost); 2643 2644 hisi_sas_free(hisi_hba); 2645 scsi_host_put(shost); 2646 } 2647 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2648 2649 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) 2650 #define DEBUGFS_ENABLE_DEFAULT "enabled" 2651 bool hisi_sas_debugfs_enable = true; 2652 u32 hisi_sas_debugfs_dump_count = 50; 2653 #else 2654 #define DEBUGFS_ENABLE_DEFAULT "disabled" 2655 bool hisi_sas_debugfs_enable; 2656 u32 hisi_sas_debugfs_dump_count = 1; 2657 #endif 2658 2659 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 2660 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 2661 MODULE_PARM_DESC(hisi_sas_debugfs_enable, 2662 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); 2663 2664 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); 2665 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); 2666 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); 2667 2668 struct dentry *hisi_sas_debugfs_dir; 2669 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); 2670 2671 static __init int hisi_sas_init(void) 2672 { 2673 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2674 if (!hisi_sas_stt) 2675 return -ENOMEM; 2676 2677 if (hisi_sas_debugfs_enable) { 2678 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 2679 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { 2680 pr_info("hisi_sas: Limiting debugfs dump count\n"); 2681 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; 2682 } 2683 } 2684 2685 return 0; 2686 } 2687 2688 static __exit void hisi_sas_exit(void) 2689 { 2690 if (hisi_sas_debugfs_enable) 2691 debugfs_remove(hisi_sas_debugfs_dir); 2692 2693 sas_release_transport(hisi_sas_stt); 2694 } 2695 2696 module_init(hisi_sas_init); 2697 module_exit(hisi_sas_exit); 2698 2699 MODULE_LICENSE("GPL"); 2700 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2701 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2702 MODULE_ALIAS("platform:" DRV_NAME); 2703