1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define DEV_IS_GONE(dev) \ 11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 12 13 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 14 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 15 void *funcdata); 16 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 17 struct domain_device *device); 18 static void hisi_sas_dev_gone(struct domain_device *device); 19 20 struct hisi_sas_internal_abort_data { 21 bool rst_ha_timeout; /* reset the HA for timeout */ 22 }; 23 24 static u8 hisi_sas_get_ata_protocol_from_tf(struct ata_queued_cmd *qc) 25 { 26 if (!qc) 27 return HISI_SAS_SATA_PROTOCOL_PIO; 28 29 switch (qc->tf.protocol) { 30 case ATA_PROT_NODATA: 31 return HISI_SAS_SATA_PROTOCOL_NONDATA; 32 case ATA_PROT_PIO: 33 return HISI_SAS_SATA_PROTOCOL_PIO; 34 case ATA_PROT_DMA: 35 return HISI_SAS_SATA_PROTOCOL_DMA; 36 case ATA_PROT_NCQ_NODATA: 37 case ATA_PROT_NCQ: 38 return HISI_SAS_SATA_PROTOCOL_FPDMA; 39 default: 40 return HISI_SAS_SATA_PROTOCOL_PIO; 41 } 42 } 43 44 u8 hisi_sas_get_ata_protocol(struct sas_task *task) 45 { 46 struct host_to_dev_fis *fis = &task->ata_task.fis; 47 struct ata_queued_cmd *qc = task->uldd_task; 48 int direction = task->data_dir; 49 50 switch (fis->command) { 51 case ATA_CMD_FPDMA_WRITE: 52 case ATA_CMD_FPDMA_READ: 53 case ATA_CMD_FPDMA_RECV: 54 case ATA_CMD_FPDMA_SEND: 55 case ATA_CMD_NCQ_NON_DATA: 56 return HISI_SAS_SATA_PROTOCOL_FPDMA; 57 58 case ATA_CMD_DOWNLOAD_MICRO: 59 case ATA_CMD_ID_ATA: 60 case ATA_CMD_PMP_READ: 61 case ATA_CMD_READ_LOG_EXT: 62 case ATA_CMD_PIO_READ: 63 case ATA_CMD_PIO_READ_EXT: 64 case ATA_CMD_PMP_WRITE: 65 case ATA_CMD_WRITE_LOG_EXT: 66 case ATA_CMD_PIO_WRITE: 67 case ATA_CMD_PIO_WRITE_EXT: 68 return HISI_SAS_SATA_PROTOCOL_PIO; 69 70 case ATA_CMD_DSM: 71 case ATA_CMD_DOWNLOAD_MICRO_DMA: 72 case ATA_CMD_PMP_READ_DMA: 73 case ATA_CMD_PMP_WRITE_DMA: 74 case ATA_CMD_READ: 75 case ATA_CMD_READ_EXT: 76 case ATA_CMD_READ_LOG_DMA_EXT: 77 case ATA_CMD_READ_STREAM_DMA_EXT: 78 case ATA_CMD_TRUSTED_RCV_DMA: 79 case ATA_CMD_TRUSTED_SND_DMA: 80 case ATA_CMD_WRITE: 81 case ATA_CMD_WRITE_EXT: 82 case ATA_CMD_WRITE_FUA_EXT: 83 case ATA_CMD_WRITE_QUEUED: 84 case ATA_CMD_WRITE_LOG_DMA_EXT: 85 case ATA_CMD_WRITE_STREAM_DMA_EXT: 86 case ATA_CMD_ZAC_MGMT_IN: 87 return HISI_SAS_SATA_PROTOCOL_DMA; 88 89 case ATA_CMD_CHK_POWER: 90 case ATA_CMD_DEV_RESET: 91 case ATA_CMD_EDD: 92 case ATA_CMD_FLUSH: 93 case ATA_CMD_FLUSH_EXT: 94 case ATA_CMD_VERIFY: 95 case ATA_CMD_VERIFY_EXT: 96 case ATA_CMD_SET_FEATURES: 97 case ATA_CMD_STANDBY: 98 case ATA_CMD_STANDBYNOW1: 99 case ATA_CMD_ZAC_MGMT_OUT: 100 return HISI_SAS_SATA_PROTOCOL_NONDATA; 101 102 case ATA_CMD_SET_MAX: 103 switch (fis->features) { 104 case ATA_SET_MAX_PASSWD: 105 case ATA_SET_MAX_LOCK: 106 return HISI_SAS_SATA_PROTOCOL_PIO; 107 108 case ATA_SET_MAX_PASSWD_DMA: 109 case ATA_SET_MAX_UNLOCK_DMA: 110 return HISI_SAS_SATA_PROTOCOL_DMA; 111 112 default: 113 return HISI_SAS_SATA_PROTOCOL_NONDATA; 114 } 115 116 default: 117 { 118 if (direction == DMA_NONE) 119 return HISI_SAS_SATA_PROTOCOL_NONDATA; 120 return hisi_sas_get_ata_protocol_from_tf(qc); 121 } 122 } 123 } 124 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 125 126 void hisi_sas_sata_done(struct sas_task *task, 127 struct hisi_sas_slot *slot) 128 { 129 struct task_status_struct *ts = &task->task_status; 130 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 131 struct hisi_sas_status_buffer *status_buf = 132 hisi_sas_status_buf_addr_mem(slot); 133 u8 *iu = &status_buf->iu[0]; 134 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 135 136 resp->frame_len = sizeof(struct dev_to_host_fis); 137 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 138 139 ts->buf_valid_size = sizeof(*resp); 140 } 141 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 142 143 /* 144 * This function assumes linkrate mask fits in 8 bits, which it 145 * does for all HW versions supported. 146 */ 147 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 148 { 149 u8 rate = 0; 150 int i; 151 152 max -= SAS_LINK_RATE_1_5_GBPS; 153 for (i = 0; i <= max; i++) 154 rate |= 1 << (i * 2); 155 return rate; 156 } 157 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 158 159 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 160 { 161 return device->port->ha->lldd_ha; 162 } 163 164 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 165 { 166 return container_of(sas_port, struct hisi_sas_port, sas_port); 167 } 168 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 169 170 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 171 { 172 int phy_no; 173 174 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 175 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 176 } 177 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 178 179 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 180 { 181 void *bitmap = hisi_hba->slot_index_tags; 182 183 __clear_bit(slot_idx, bitmap); 184 } 185 186 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 187 { 188 if (hisi_hba->hw->slot_index_alloc || 189 slot_idx < HISI_SAS_RESERVED_IPTT) { 190 spin_lock(&hisi_hba->lock); 191 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 192 spin_unlock(&hisi_hba->lock); 193 } 194 } 195 196 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 197 { 198 void *bitmap = hisi_hba->slot_index_tags; 199 200 __set_bit(slot_idx, bitmap); 201 } 202 203 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 204 struct request *rq) 205 { 206 int index; 207 void *bitmap = hisi_hba->slot_index_tags; 208 209 if (rq) 210 return rq->tag + HISI_SAS_RESERVED_IPTT; 211 212 spin_lock(&hisi_hba->lock); 213 index = find_next_zero_bit(bitmap, HISI_SAS_RESERVED_IPTT, 214 hisi_hba->last_slot_index + 1); 215 if (index >= HISI_SAS_RESERVED_IPTT) { 216 index = find_next_zero_bit(bitmap, 217 HISI_SAS_RESERVED_IPTT, 218 0); 219 if (index >= HISI_SAS_RESERVED_IPTT) { 220 spin_unlock(&hisi_hba->lock); 221 return -SAS_QUEUE_FULL; 222 } 223 } 224 hisi_sas_slot_index_set(hisi_hba, index); 225 hisi_hba->last_slot_index = index; 226 spin_unlock(&hisi_hba->lock); 227 228 return index; 229 } 230 231 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 232 struct hisi_sas_slot *slot, bool need_lock) 233 { 234 int device_id = slot->device_id; 235 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 236 237 if (task) { 238 struct device *dev = hisi_hba->dev; 239 240 if (!task->lldd_task) 241 return; 242 243 task->lldd_task = NULL; 244 245 if (!sas_protocol_ata(task->task_proto)) { 246 if (slot->n_elem) { 247 if (task->task_proto & SAS_PROTOCOL_SSP) 248 dma_unmap_sg(dev, task->scatter, 249 task->num_scatter, 250 task->data_dir); 251 else 252 dma_unmap_sg(dev, &task->smp_task.smp_req, 253 1, DMA_TO_DEVICE); 254 } 255 if (slot->n_elem_dif) { 256 struct sas_ssp_task *ssp_task = &task->ssp_task; 257 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 258 259 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 260 scsi_prot_sg_count(scsi_cmnd), 261 task->data_dir); 262 } 263 } 264 } 265 266 if (need_lock) { 267 spin_lock(&sas_dev->lock); 268 list_del_init(&slot->entry); 269 spin_unlock(&sas_dev->lock); 270 } else { 271 list_del_init(&slot->entry); 272 } 273 274 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 275 276 hisi_sas_slot_index_free(hisi_hba, slot->idx); 277 } 278 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 279 280 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 281 struct hisi_sas_slot *slot) 282 { 283 hisi_hba->hw->prep_smp(hisi_hba, slot); 284 } 285 286 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 287 struct hisi_sas_slot *slot) 288 { 289 hisi_hba->hw->prep_ssp(hisi_hba, slot); 290 } 291 292 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 293 struct hisi_sas_slot *slot) 294 { 295 hisi_hba->hw->prep_stp(hisi_hba, slot); 296 } 297 298 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 299 struct hisi_sas_slot *slot) 300 { 301 hisi_hba->hw->prep_abort(hisi_hba, slot); 302 } 303 304 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 305 struct sas_task *task, int n_elem) 306 { 307 struct device *dev = hisi_hba->dev; 308 309 if (!sas_protocol_ata(task->task_proto) && n_elem) { 310 if (task->num_scatter) { 311 dma_unmap_sg(dev, task->scatter, task->num_scatter, 312 task->data_dir); 313 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 314 dma_unmap_sg(dev, &task->smp_task.smp_req, 315 1, DMA_TO_DEVICE); 316 } 317 } 318 } 319 320 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 321 struct sas_task *task, int *n_elem) 322 { 323 struct device *dev = hisi_hba->dev; 324 int rc; 325 326 if (sas_protocol_ata(task->task_proto)) { 327 *n_elem = task->num_scatter; 328 } else { 329 unsigned int req_len; 330 331 if (task->num_scatter) { 332 *n_elem = dma_map_sg(dev, task->scatter, 333 task->num_scatter, task->data_dir); 334 if (!*n_elem) { 335 rc = -ENOMEM; 336 goto prep_out; 337 } 338 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 339 *n_elem = dma_map_sg(dev, &task->smp_task.smp_req, 340 1, DMA_TO_DEVICE); 341 if (!*n_elem) { 342 rc = -ENOMEM; 343 goto prep_out; 344 } 345 req_len = sg_dma_len(&task->smp_task.smp_req); 346 if (req_len & 0x3) { 347 rc = -EINVAL; 348 goto err_out_dma_unmap; 349 } 350 } 351 } 352 353 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 354 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", 355 *n_elem); 356 rc = -EINVAL; 357 goto err_out_dma_unmap; 358 } 359 return 0; 360 361 err_out_dma_unmap: 362 /* It would be better to call dma_unmap_sg() here, but it's messy */ 363 hisi_sas_dma_unmap(hisi_hba, task, *n_elem); 364 prep_out: 365 return rc; 366 } 367 368 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 369 struct sas_task *task, int n_elem_dif) 370 { 371 struct device *dev = hisi_hba->dev; 372 373 if (n_elem_dif) { 374 struct sas_ssp_task *ssp_task = &task->ssp_task; 375 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 376 377 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 378 scsi_prot_sg_count(scsi_cmnd), 379 task->data_dir); 380 } 381 } 382 383 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 384 int *n_elem_dif, struct sas_task *task) 385 { 386 struct device *dev = hisi_hba->dev; 387 struct sas_ssp_task *ssp_task; 388 struct scsi_cmnd *scsi_cmnd; 389 int rc; 390 391 if (task->num_scatter) { 392 ssp_task = &task->ssp_task; 393 scsi_cmnd = ssp_task->cmd; 394 395 if (scsi_prot_sg_count(scsi_cmnd)) { 396 *n_elem_dif = dma_map_sg(dev, 397 scsi_prot_sglist(scsi_cmnd), 398 scsi_prot_sg_count(scsi_cmnd), 399 task->data_dir); 400 401 if (!*n_elem_dif) 402 return -ENOMEM; 403 404 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 405 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 406 *n_elem_dif); 407 rc = -EINVAL; 408 goto err_out_dif_dma_unmap; 409 } 410 } 411 } 412 413 return 0; 414 415 err_out_dif_dma_unmap: 416 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 417 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 418 return rc; 419 } 420 421 static 422 void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, 423 struct hisi_sas_slot *slot, 424 struct hisi_sas_dq *dq, 425 struct hisi_sas_device *sas_dev) 426 { 427 struct hisi_sas_cmd_hdr *cmd_hdr_base; 428 int dlvry_queue_slot, dlvry_queue; 429 struct sas_task *task = slot->task; 430 int wr_q_index; 431 432 spin_lock(&dq->lock); 433 wr_q_index = dq->wr_point; 434 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 435 list_add_tail(&slot->delivery, &dq->list); 436 spin_unlock(&dq->lock); 437 spin_lock(&sas_dev->lock); 438 list_add_tail(&slot->entry, &sas_dev->list); 439 spin_unlock(&sas_dev->lock); 440 441 dlvry_queue = dq->id; 442 dlvry_queue_slot = wr_q_index; 443 444 slot->device_id = sas_dev->device_id; 445 slot->dlvry_queue = dlvry_queue; 446 slot->dlvry_queue_slot = dlvry_queue_slot; 447 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 448 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 449 450 task->lldd_task = slot; 451 452 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 453 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 454 memset(hisi_sas_status_buf_addr_mem(slot), 0, 455 sizeof(struct hisi_sas_err_record)); 456 457 switch (task->task_proto) { 458 case SAS_PROTOCOL_SMP: 459 hisi_sas_task_prep_smp(hisi_hba, slot); 460 break; 461 case SAS_PROTOCOL_SSP: 462 hisi_sas_task_prep_ssp(hisi_hba, slot); 463 break; 464 case SAS_PROTOCOL_SATA: 465 case SAS_PROTOCOL_STP: 466 case SAS_PROTOCOL_STP_ALL: 467 hisi_sas_task_prep_ata(hisi_hba, slot); 468 break; 469 case SAS_PROTOCOL_INTERNAL_ABORT: 470 hisi_sas_task_prep_abort(hisi_hba, slot); 471 break; 472 default: 473 return; 474 } 475 476 /* Make slot memories observable before marking as ready */ 477 smp_wmb(); 478 WRITE_ONCE(slot->ready, 1); 479 480 spin_lock(&dq->lock); 481 hisi_hba->hw->start_delivery(dq); 482 spin_unlock(&dq->lock); 483 } 484 485 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 486 { 487 int n_elem = 0, n_elem_dif = 0; 488 struct domain_device *device = task->dev; 489 struct asd_sas_port *sas_port = device->port; 490 struct hisi_sas_device *sas_dev = device->lldd_dev; 491 bool internal_abort = sas_is_internal_abort(task); 492 struct hisi_sas_dq *dq = NULL; 493 struct hisi_sas_port *port; 494 struct hisi_hba *hisi_hba; 495 struct hisi_sas_slot *slot; 496 struct request *rq = NULL; 497 struct device *dev; 498 int rc; 499 500 if (!sas_port) { 501 struct task_status_struct *ts = &task->task_status; 502 503 ts->resp = SAS_TASK_UNDELIVERED; 504 ts->stat = SAS_PHY_DOWN; 505 /* 506 * libsas will use dev->port, should 507 * not call task_done for sata 508 */ 509 if (device->dev_type != SAS_SATA_DEV && !internal_abort) 510 task->task_done(task); 511 return -ECOMM; 512 } 513 514 hisi_hba = dev_to_hisi_hba(device); 515 dev = hisi_hba->dev; 516 517 switch (task->task_proto) { 518 case SAS_PROTOCOL_SSP: 519 case SAS_PROTOCOL_SMP: 520 case SAS_PROTOCOL_SATA: 521 case SAS_PROTOCOL_STP: 522 case SAS_PROTOCOL_STP_ALL: 523 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 524 if (!gfpflags_allow_blocking(gfp_flags)) 525 return -EINVAL; 526 527 down(&hisi_hba->sem); 528 up(&hisi_hba->sem); 529 } 530 531 if (DEV_IS_GONE(sas_dev)) { 532 if (sas_dev) 533 dev_info(dev, "task prep: device %d not ready\n", 534 sas_dev->device_id); 535 else 536 dev_info(dev, "task prep: device %016llx not ready\n", 537 SAS_ADDR(device->sas_addr)); 538 539 return -ECOMM; 540 } 541 542 port = to_hisi_sas_port(sas_port); 543 if (!port->port_attached) { 544 dev_info(dev, "task prep: %s port%d not attach device\n", 545 dev_is_sata(device) ? "SATA/STP" : "SAS", 546 device->port->id); 547 548 return -ECOMM; 549 } 550 551 rq = sas_task_find_rq(task); 552 if (rq) { 553 unsigned int dq_index; 554 u32 blk_tag; 555 556 blk_tag = blk_mq_unique_tag(rq); 557 dq_index = blk_mq_unique_tag_to_hwq(blk_tag); 558 dq = &hisi_hba->dq[dq_index]; 559 } else { 560 int queue; 561 562 if (hisi_hba->iopoll_q_cnt) { 563 /* 564 * Use interrupt queue (queue 0) to deliver and complete 565 * internal IOs of libsas or libata when there is at least 566 * one iopoll queue 567 */ 568 queue = 0; 569 } else { 570 struct Scsi_Host *shost = hisi_hba->shost; 571 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 572 573 queue = qmap->mq_map[raw_smp_processor_id()]; 574 } 575 dq = &hisi_hba->dq[queue]; 576 } 577 break; 578 case SAS_PROTOCOL_INTERNAL_ABORT: 579 if (!hisi_hba->hw->prep_abort) 580 return TMF_RESP_FUNC_FAILED; 581 582 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) 583 return -EIO; 584 585 hisi_hba = dev_to_hisi_hba(device); 586 587 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 588 return -EINVAL; 589 590 port = to_hisi_sas_port(sas_port); 591 dq = &hisi_hba->dq[task->abort_task.qid]; 592 break; 593 default: 594 dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", 595 task->task_proto); 596 return -EINVAL; 597 } 598 599 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem); 600 if (rc < 0) 601 goto prep_out; 602 603 if (!sas_protocol_ata(task->task_proto)) { 604 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 605 if (rc < 0) 606 goto err_out_dma_unmap; 607 } 608 609 if (!internal_abort && hisi_hba->hw->slot_index_alloc) 610 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 611 else 612 rc = hisi_sas_slot_index_alloc(hisi_hba, rq); 613 614 if (rc < 0) 615 goto err_out_dif_dma_unmap; 616 617 slot = &hisi_hba->slot_info[rc]; 618 slot->n_elem = n_elem; 619 slot->n_elem_dif = n_elem_dif; 620 slot->task = task; 621 slot->port = port; 622 623 slot->tmf = task->tmf; 624 slot->is_internal = !!task->tmf || internal_abort; 625 626 /* protect task_prep and start_delivery sequence */ 627 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev); 628 629 return 0; 630 631 err_out_dif_dma_unmap: 632 if (!sas_protocol_ata(task->task_proto)) 633 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 634 err_out_dma_unmap: 635 hisi_sas_dma_unmap(hisi_hba, task, n_elem); 636 prep_out: 637 dev_err(dev, "task exec: failed[%d]!\n", rc); 638 return rc; 639 } 640 641 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, 642 gfp_t gfp_flags) 643 { 644 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 645 struct asd_sas_phy *sas_phy = &phy->sas_phy; 646 647 if (!phy->phy_attached) 648 return; 649 650 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 651 652 if (sas_phy->phy) { 653 struct sas_phy *sphy = sas_phy->phy; 654 655 sphy->negotiated_linkrate = sas_phy->linkrate; 656 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 657 sphy->maximum_linkrate_hw = 658 hisi_hba->hw->phy_get_max_linkrate(); 659 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 660 sphy->minimum_linkrate = phy->minimum_linkrate; 661 662 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 663 sphy->maximum_linkrate = phy->maximum_linkrate; 664 } 665 666 if (phy->phy_type & PORT_TYPE_SAS) { 667 struct sas_identify_frame *id; 668 669 id = (struct sas_identify_frame *)phy->frame_rcvd; 670 id->dev_type = phy->identify.device_type; 671 id->initiator_bits = SAS_PROTOCOL_ALL; 672 id->target_bits = phy->identify.target_port_protocols; 673 } else if (phy->phy_type & PORT_TYPE_SATA) { 674 /* Nothing */ 675 } 676 677 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 678 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 679 } 680 681 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 682 { 683 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 684 struct hisi_sas_device *sas_dev = NULL; 685 int last = hisi_hba->last_dev_id; 686 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 687 int i; 688 689 spin_lock(&hisi_hba->lock); 690 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 691 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 692 int queue = i % hisi_hba->queue_count; 693 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 694 695 hisi_hba->devices[i].device_id = i; 696 sas_dev = &hisi_hba->devices[i]; 697 sas_dev->dev_status = HISI_SAS_DEV_INIT; 698 sas_dev->dev_type = device->dev_type; 699 sas_dev->hisi_hba = hisi_hba; 700 sas_dev->sas_device = device; 701 sas_dev->dq = dq; 702 spin_lock_init(&sas_dev->lock); 703 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 704 break; 705 } 706 i++; 707 } 708 hisi_hba->last_dev_id = i; 709 spin_unlock(&hisi_hba->lock); 710 711 return sas_dev; 712 } 713 714 static void hisi_sas_sync_poll_cq(struct hisi_sas_cq *cq) 715 { 716 /* make sure CQ entries being processed are processed to completion */ 717 spin_lock(&cq->poll_lock); 718 spin_unlock(&cq->poll_lock); 719 } 720 721 static bool hisi_sas_queue_is_poll(struct hisi_sas_cq *cq) 722 { 723 struct hisi_hba *hisi_hba = cq->hisi_hba; 724 725 if (cq->id < hisi_hba->queue_count - hisi_hba->iopoll_q_cnt) 726 return false; 727 return true; 728 } 729 730 static void hisi_sas_sync_cq(struct hisi_sas_cq *cq) 731 { 732 if (hisi_sas_queue_is_poll(cq)) 733 hisi_sas_sync_poll_cq(cq); 734 else 735 synchronize_irq(cq->irq_no); 736 } 737 738 void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba) 739 { 740 int i; 741 742 for (i = 0; i < hisi_hba->queue_count; i++) { 743 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 744 745 if (hisi_sas_queue_is_poll(cq)) 746 hisi_sas_sync_poll_cq(cq); 747 } 748 } 749 EXPORT_SYMBOL_GPL(hisi_sas_sync_poll_cqs); 750 751 void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba) 752 { 753 int i; 754 755 for (i = 0; i < hisi_hba->queue_count; i++) { 756 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 757 758 hisi_sas_sync_cq(cq); 759 } 760 } 761 EXPORT_SYMBOL_GPL(hisi_sas_sync_cqs); 762 763 static void hisi_sas_tmf_aborted(struct sas_task *task) 764 { 765 struct hisi_sas_slot *slot = task->lldd_task; 766 struct domain_device *device = task->dev; 767 struct hisi_sas_device *sas_dev = device->lldd_dev; 768 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 769 770 if (slot) { 771 struct hisi_sas_cq *cq = 772 &hisi_hba->cq[slot->dlvry_queue]; 773 /* 774 * sync irq or poll queue to avoid free'ing task 775 * before using task in IO completion 776 */ 777 hisi_sas_sync_cq(cq); 778 slot->task = NULL; 779 } 780 } 781 782 #define HISI_SAS_DISK_RECOVER_CNT 3 783 static int hisi_sas_init_device(struct domain_device *device) 784 { 785 int rc = TMF_RESP_FUNC_COMPLETE; 786 struct scsi_lun lun; 787 int retry = HISI_SAS_DISK_RECOVER_CNT; 788 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 789 790 switch (device->dev_type) { 791 case SAS_END_DEVICE: 792 int_to_scsilun(0, &lun); 793 794 while (retry-- > 0) { 795 rc = sas_abort_task_set(device, lun.scsi_lun); 796 if (rc == TMF_RESP_FUNC_COMPLETE) { 797 hisi_sas_release_task(hisi_hba, device); 798 break; 799 } 800 } 801 break; 802 case SAS_SATA_DEV: 803 case SAS_SATA_PM: 804 case SAS_SATA_PM_PORT: 805 case SAS_SATA_PENDING: 806 /* 807 * If an expander is swapped when a SATA disk is attached then 808 * we should issue a hard reset to clear previous affiliation 809 * of STP target port, see SPL (chapter 6.19.4). 810 * 811 * However we don't need to issue a hard reset here for these 812 * reasons: 813 * a. When probing the device, libsas/libata already issues a 814 * hard reset in sas_probe_sata() -> ata_port_probe(). 815 * Note that in hisi_sas_debug_I_T_nexus_reset() we take care 816 * to issue a hard reset by checking the dev status (== INIT). 817 * b. When resetting the controller, this is simply unnecessary. 818 */ 819 while (retry-- > 0) { 820 rc = hisi_sas_softreset_ata_disk(device); 821 if (!rc) 822 break; 823 } 824 break; 825 default: 826 break; 827 } 828 829 return rc; 830 } 831 832 int hisi_sas_sdev_init(struct scsi_device *sdev) 833 { 834 struct domain_device *ddev = sdev_to_domain_dev(sdev); 835 struct hisi_sas_device *sas_dev = ddev->lldd_dev; 836 int rc; 837 838 rc = sas_sdev_init(sdev); 839 if (rc) 840 return rc; 841 842 rc = hisi_sas_init_device(ddev); 843 if (rc) 844 return rc; 845 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 846 return 0; 847 } 848 EXPORT_SYMBOL_GPL(hisi_sas_sdev_init); 849 850 static int hisi_sas_dev_found(struct domain_device *device) 851 { 852 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 853 struct domain_device *parent_dev = device->parent; 854 struct hisi_sas_device *sas_dev; 855 struct device *dev = hisi_hba->dev; 856 int rc; 857 858 if (hisi_hba->hw->alloc_dev) 859 sas_dev = hisi_hba->hw->alloc_dev(device); 860 else 861 sas_dev = hisi_sas_alloc_dev(device); 862 if (!sas_dev) { 863 dev_err(dev, "fail alloc dev: max support %d devices\n", 864 HISI_SAS_MAX_DEVICES); 865 return -EINVAL; 866 } 867 868 device->lldd_dev = sas_dev; 869 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 870 871 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 872 int phy_no; 873 874 phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device); 875 if (phy_no < 0) { 876 dev_info(dev, "dev found: no attached " 877 "dev:%016llx at ex:%016llx\n", 878 SAS_ADDR(device->sas_addr), 879 SAS_ADDR(parent_dev->sas_addr)); 880 rc = phy_no; 881 goto err_out; 882 } 883 } 884 885 dev_info(dev, "dev[%d:%x] found\n", 886 sas_dev->device_id, sas_dev->dev_type); 887 888 return 0; 889 890 err_out: 891 hisi_sas_dev_gone(device); 892 return rc; 893 } 894 895 int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) 896 { 897 struct domain_device *dev = sdev_to_domain_dev(sdev); 898 int ret = sas_sdev_configure(sdev, lim); 899 900 if (ret) 901 return ret; 902 if (!dev_is_sata(dev)) 903 sas_change_queue_depth(sdev, 64); 904 905 return 0; 906 } 907 EXPORT_SYMBOL_GPL(hisi_sas_sdev_configure); 908 909 void hisi_sas_scan_start(struct Scsi_Host *shost) 910 { 911 struct hisi_hba *hisi_hba = shost_priv(shost); 912 913 hisi_hba->hw->phys_init(hisi_hba); 914 } 915 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 916 917 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 918 { 919 struct hisi_hba *hisi_hba = shost_priv(shost); 920 struct sas_ha_struct *sha = &hisi_hba->sha; 921 922 /* Wait for PHY up interrupt to occur */ 923 if (time < HZ) 924 return 0; 925 926 sas_drain_work(sha); 927 return 1; 928 } 929 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 930 931 static void hisi_sas_phyup_work_common(struct work_struct *work, 932 enum hisi_sas_phy_event event) 933 { 934 struct hisi_sas_phy *phy = 935 container_of(work, typeof(*phy), works[event]); 936 struct hisi_hba *hisi_hba = phy->hisi_hba; 937 struct asd_sas_phy *sas_phy = &phy->sas_phy; 938 int phy_no = sas_phy->id; 939 940 phy->wait_phyup_cnt = 0; 941 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 942 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 943 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); 944 } 945 946 static void hisi_sas_phyup_work(struct work_struct *work) 947 { 948 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); 949 } 950 951 static void hisi_sas_linkreset_work(struct work_struct *work) 952 { 953 struct hisi_sas_phy *phy = 954 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 955 struct asd_sas_phy *sas_phy = &phy->sas_phy; 956 957 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 958 } 959 960 static void hisi_sas_phyup_pm_work(struct work_struct *work) 961 { 962 struct hisi_sas_phy *phy = 963 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); 964 struct hisi_hba *hisi_hba = phy->hisi_hba; 965 struct device *dev = hisi_hba->dev; 966 967 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); 968 pm_runtime_put_sync(dev); 969 } 970 971 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 972 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 973 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 974 [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, 975 }; 976 977 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 978 enum hisi_sas_phy_event event) 979 { 980 struct hisi_hba *hisi_hba = phy->hisi_hba; 981 982 if (WARN_ON(event >= HISI_PHYES_NUM)) 983 return false; 984 985 return queue_work(hisi_hba->wq, &phy->works[event]); 986 } 987 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 988 989 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 990 { 991 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 992 struct hisi_hba *hisi_hba = phy->hisi_hba; 993 struct device *dev = hisi_hba->dev; 994 int phy_no = phy->sas_phy.id; 995 996 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 997 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 998 } 999 1000 #define HISI_SAS_WAIT_PHYUP_RETRIES 10 1001 1002 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 1003 { 1004 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1005 struct device *dev = hisi_hba->dev; 1006 unsigned long flags; 1007 1008 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 1009 spin_lock_irqsave(&phy->lock, flags); 1010 if (phy->phy_attached) { 1011 spin_unlock_irqrestore(&phy->lock, flags); 1012 return; 1013 } 1014 1015 if (!timer_pending(&phy->timer)) { 1016 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { 1017 phy->wait_phyup_cnt++; 1018 phy->timer.expires = jiffies + 1019 HISI_SAS_WAIT_PHYUP_TIMEOUT; 1020 add_timer(&phy->timer); 1021 spin_unlock_irqrestore(&phy->lock, flags); 1022 return; 1023 } 1024 1025 dev_warn(dev, "phy%d failed to come up %d times, giving up\n", 1026 phy_no, phy->wait_phyup_cnt); 1027 phy->wait_phyup_cnt = 0; 1028 } 1029 spin_unlock_irqrestore(&phy->lock, flags); 1030 } 1031 1032 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 1033 1034 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 1035 { 1036 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1037 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1038 int i; 1039 1040 phy->hisi_hba = hisi_hba; 1041 phy->port = NULL; 1042 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 1043 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 1044 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 1045 sas_phy->iproto = SAS_PROTOCOL_ALL; 1046 sas_phy->tproto = 0; 1047 sas_phy->role = PHY_ROLE_INITIATOR; 1048 sas_phy->oob_mode = OOB_NOT_CONNECTED; 1049 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 1050 sas_phy->id = phy_no; 1051 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 1052 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 1053 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 1054 sas_phy->lldd_phy = phy; 1055 1056 for (i = 0; i < HISI_PHYES_NUM; i++) 1057 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 1058 1059 spin_lock_init(&phy->lock); 1060 1061 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 1062 } 1063 1064 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 1065 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 1066 { 1067 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1068 struct asd_sas_phy *aphy = &phy->sas_phy; 1069 struct sas_phy *sphy = aphy->phy; 1070 unsigned long flags; 1071 1072 spin_lock_irqsave(&phy->lock, flags); 1073 1074 if (enable) { 1075 /* We may have been enabled already; if so, don't touch */ 1076 if (!phy->enable) 1077 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 1078 hisi_hba->hw->phy_start(hisi_hba, phy_no); 1079 } else { 1080 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 1081 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 1082 } 1083 phy->enable = enable; 1084 spin_unlock_irqrestore(&phy->lock, flags); 1085 } 1086 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 1087 1088 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 1089 { 1090 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 1091 struct asd_sas_port *sas_port = sas_phy->port; 1092 struct hisi_sas_port *port; 1093 1094 if (!sas_port) 1095 return; 1096 1097 port = to_hisi_sas_port(sas_port); 1098 port->port_attached = 1; 1099 port->id = phy->port_id; 1100 phy->port = port; 1101 sas_port->lldd_port = port; 1102 } 1103 1104 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1105 struct hisi_sas_slot *slot, bool need_lock) 1106 { 1107 if (task) { 1108 unsigned long flags; 1109 struct task_status_struct *ts; 1110 1111 ts = &task->task_status; 1112 1113 ts->resp = SAS_TASK_COMPLETE; 1114 ts->stat = SAS_ABORTED_TASK; 1115 spin_lock_irqsave(&task->task_state_lock, flags); 1116 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 1117 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1118 task->task_state_flags |= SAS_TASK_STATE_DONE; 1119 spin_unlock_irqrestore(&task->task_state_lock, flags); 1120 } 1121 1122 hisi_sas_slot_task_free(hisi_hba, task, slot, need_lock); 1123 } 1124 1125 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1126 struct domain_device *device) 1127 { 1128 struct hisi_sas_slot *slot, *slot2; 1129 struct hisi_sas_device *sas_dev = device->lldd_dev; 1130 1131 spin_lock(&sas_dev->lock); 1132 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1133 hisi_sas_do_release_task(hisi_hba, slot->task, slot, false); 1134 1135 spin_unlock(&sas_dev->lock); 1136 } 1137 1138 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1139 { 1140 struct hisi_sas_device *sas_dev; 1141 struct domain_device *device; 1142 int i; 1143 1144 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1145 sas_dev = &hisi_hba->devices[i]; 1146 device = sas_dev->sas_device; 1147 1148 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1149 !device) 1150 continue; 1151 1152 hisi_sas_release_task(hisi_hba, device); 1153 } 1154 } 1155 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1156 1157 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1158 struct domain_device *device) 1159 { 1160 if (hisi_hba->hw->dereg_device) 1161 hisi_hba->hw->dereg_device(hisi_hba, device); 1162 } 1163 1164 static int 1165 hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev, 1166 bool rst_ha_timeout) 1167 { 1168 struct hisi_sas_internal_abort_data data = { rst_ha_timeout }; 1169 struct domain_device *device = sas_dev->sas_device; 1170 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1171 int i, rc; 1172 1173 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 1174 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1175 const struct cpumask *mask = cq->irq_mask; 1176 1177 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 1178 continue; 1179 rc = sas_execute_internal_abort_dev(device, i, &data); 1180 if (rc) 1181 return rc; 1182 } 1183 1184 return 0; 1185 } 1186 1187 static void hisi_sas_dev_gone(struct domain_device *device) 1188 { 1189 struct hisi_sas_device *sas_dev = device->lldd_dev; 1190 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1191 struct device *dev = hisi_hba->dev; 1192 int ret = 0; 1193 1194 dev_info(dev, "dev[%d:%x] is gone\n", 1195 sas_dev->device_id, sas_dev->dev_type); 1196 1197 down(&hisi_hba->sem); 1198 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1199 hisi_sas_internal_task_abort_dev(sas_dev, true); 1200 1201 hisi_sas_dereg_device(hisi_hba, device); 1202 1203 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1204 device->lldd_dev = NULL; 1205 } 1206 1207 if (hisi_hba->hw->free_device) 1208 hisi_hba->hw->free_device(sas_dev); 1209 1210 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ 1211 if (!ret) 1212 sas_dev->dev_type = SAS_PHY_UNUSED; 1213 sas_dev->sas_device = NULL; 1214 up(&hisi_hba->sem); 1215 } 1216 1217 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1218 struct sas_phy_linkrates *r) 1219 { 1220 struct sas_phy_linkrates _r; 1221 1222 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1223 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1224 enum sas_linkrate min, max; 1225 1226 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1227 return -EINVAL; 1228 1229 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1230 max = sas_phy->phy->maximum_linkrate; 1231 min = r->minimum_linkrate; 1232 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1233 max = r->maximum_linkrate; 1234 min = sas_phy->phy->minimum_linkrate; 1235 } else 1236 return -EINVAL; 1237 1238 _r.maximum_linkrate = max; 1239 _r.minimum_linkrate = min; 1240 1241 sas_phy->phy->maximum_linkrate = max; 1242 sas_phy->phy->minimum_linkrate = min; 1243 1244 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1245 msleep(100); 1246 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1247 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1248 1249 return 0; 1250 } 1251 1252 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1253 void *funcdata) 1254 { 1255 struct hisi_sas_phy *phy = container_of(sas_phy, 1256 struct hisi_sas_phy, sas_phy); 1257 struct sas_ha_struct *sas_ha = sas_phy->ha; 1258 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1259 struct device *dev = hisi_hba->dev; 1260 DECLARE_COMPLETION_ONSTACK(completion); 1261 int phy_no = sas_phy->id; 1262 u8 sts = phy->phy_attached; 1263 int ret = 0; 1264 1265 down(&hisi_hba->sem); 1266 phy->reset_completion = &completion; 1267 1268 switch (func) { 1269 case PHY_FUNC_HARD_RESET: 1270 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1271 break; 1272 1273 case PHY_FUNC_LINK_RESET: 1274 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1275 msleep(100); 1276 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1277 break; 1278 1279 case PHY_FUNC_DISABLE: 1280 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1281 goto out; 1282 1283 case PHY_FUNC_SET_LINK_RATE: 1284 ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1285 break; 1286 1287 case PHY_FUNC_GET_EVENTS: 1288 if (hisi_hba->hw->get_events) { 1289 hisi_hba->hw->get_events(hisi_hba, phy_no); 1290 goto out; 1291 } 1292 fallthrough; 1293 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1294 default: 1295 ret = -EOPNOTSUPP; 1296 goto out; 1297 } 1298 1299 if (sts && !wait_for_completion_timeout(&completion, 1300 HISI_SAS_WAIT_PHYUP_TIMEOUT)) { 1301 dev_warn(dev, "phy%d wait phyup timed out for func %d\n", 1302 phy_no, func); 1303 if (phy->in_reset) 1304 ret = -ETIMEDOUT; 1305 } 1306 1307 out: 1308 phy->reset_completion = NULL; 1309 1310 up(&hisi_hba->sem); 1311 return ret; 1312 } 1313 1314 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1315 bool reset, int pmp, u8 *fis) 1316 { 1317 struct ata_taskfile tf; 1318 1319 ata_tf_init(dev, &tf); 1320 if (reset) 1321 tf.ctl |= ATA_SRST; 1322 else 1323 tf.ctl &= ~ATA_SRST; 1324 tf.command = ATA_CMD_DEV_RESET; 1325 ata_tf_to_fis(&tf, pmp, 0, fis); 1326 } 1327 1328 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1329 { 1330 u8 fis[20] = {0}; 1331 struct ata_port *ap = device->sata_dev.ap; 1332 struct ata_link *link; 1333 int rc = TMF_RESP_FUNC_FAILED; 1334 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1335 struct device *dev = hisi_hba->dev; 1336 1337 ata_for_each_link(link, ap, EDGE) { 1338 int pmp = sata_srst_pmp(link); 1339 1340 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1341 rc = sas_execute_ata_cmd(device, fis, -1); 1342 if (rc != TMF_RESP_FUNC_COMPLETE) 1343 break; 1344 } 1345 1346 if (rc == TMF_RESP_FUNC_COMPLETE) { 1347 usleep_range(900, 1000); 1348 ata_for_each_link(link, ap, EDGE) { 1349 int pmp = sata_srst_pmp(link); 1350 1351 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1352 rc = sas_execute_ata_cmd(device, fis, -1); 1353 if (rc != TMF_RESP_FUNC_COMPLETE) 1354 dev_err(dev, "ata disk %016llx de-reset failed\n", 1355 SAS_ADDR(device->sas_addr)); 1356 } 1357 } else { 1358 dev_err(dev, "ata disk %016llx reset failed\n", 1359 SAS_ADDR(device->sas_addr)); 1360 } 1361 1362 if (rc == TMF_RESP_FUNC_COMPLETE) 1363 hisi_sas_release_task(hisi_hba, device); 1364 1365 return rc; 1366 } 1367 1368 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1369 { 1370 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1371 int i; 1372 1373 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1374 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1375 struct domain_device *device = sas_dev->sas_device; 1376 struct asd_sas_port *sas_port; 1377 struct hisi_sas_port *port; 1378 struct hisi_sas_phy *phy = NULL; 1379 struct asd_sas_phy *sas_phy; 1380 1381 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1382 || !device || !device->port) 1383 continue; 1384 1385 sas_port = device->port; 1386 port = to_hisi_sas_port(sas_port); 1387 1388 spin_lock(&sas_port->phy_list_lock); 1389 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1390 if (state & BIT(sas_phy->id)) { 1391 phy = sas_phy->lldd_phy; 1392 break; 1393 } 1394 spin_unlock(&sas_port->phy_list_lock); 1395 1396 if (phy) { 1397 port->id = phy->port_id; 1398 1399 /* Update linkrate of directly attached device. */ 1400 if (!device->parent) 1401 device->linkrate = phy->sas_phy.linkrate; 1402 1403 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1404 } else if (!port->port_attached) 1405 port->id = 0xff; 1406 } 1407 } 1408 1409 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) 1410 { 1411 u32 new_state = hisi_hba->hw->get_phys_state(hisi_hba); 1412 struct asd_sas_port *_sas_port = NULL; 1413 int phy_no; 1414 1415 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1416 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1417 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1418 struct asd_sas_port *sas_port = sas_phy->port; 1419 bool do_port_check = _sas_port != sas_port; 1420 1421 if (!sas_phy->phy->enabled) 1422 continue; 1423 1424 /* Report PHY state change to libsas */ 1425 if (new_state & BIT(phy_no)) { 1426 if (do_port_check && sas_port && sas_port->port_dev) { 1427 struct domain_device *dev = sas_port->port_dev; 1428 1429 _sas_port = sas_port; 1430 1431 if (dev_is_expander(dev->dev_type)) 1432 sas_notify_port_event(sas_phy, 1433 PORTE_BROADCAST_RCVD, 1434 GFP_KERNEL); 1435 } 1436 } else { 1437 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); 1438 1439 /* 1440 * The new_state is not ready but old_state is ready, 1441 * the two possible causes: 1442 * 1. The connected device is removed 1443 * 2. Device exists but phyup timed out 1444 */ 1445 if (state & BIT(phy_no)) 1446 hisi_sas_notify_phy_event(phy, 1447 HISI_PHYE_LINK_RESET); 1448 } 1449 } 1450 } 1451 1452 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1453 { 1454 struct hisi_sas_device *sas_dev; 1455 struct domain_device *device; 1456 int i; 1457 1458 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1459 sas_dev = &hisi_hba->devices[i]; 1460 device = sas_dev->sas_device; 1461 1462 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1463 continue; 1464 1465 hisi_sas_init_device(device); 1466 } 1467 } 1468 1469 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1470 struct asd_sas_port *sas_port, 1471 struct domain_device *device) 1472 { 1473 struct ata_port *ap = device->sata_dev.ap; 1474 struct device *dev = hisi_hba->dev; 1475 int rc = TMF_RESP_FUNC_FAILED; 1476 struct ata_link *link; 1477 u8 fis[20] = {0}; 1478 int i; 1479 1480 for (i = 0; i < hisi_hba->n_phy; i++) { 1481 if (!(sas_port->phy_mask & BIT(i))) 1482 continue; 1483 1484 ata_for_each_link(link, ap, EDGE) { 1485 int pmp = sata_srst_pmp(link); 1486 1487 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1488 rc = sas_execute_ata_cmd(device, fis, i); 1489 if (rc != TMF_RESP_FUNC_COMPLETE) { 1490 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1491 i, rc); 1492 break; 1493 } 1494 } 1495 } 1496 } 1497 1498 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1499 { 1500 struct device *dev = hisi_hba->dev; 1501 int port_no, rc, i; 1502 1503 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1504 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1505 struct domain_device *device = sas_dev->sas_device; 1506 1507 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1508 continue; 1509 1510 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1511 if (rc < 0) 1512 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1513 } 1514 1515 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1516 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1517 struct asd_sas_port *sas_port = &port->sas_port; 1518 struct domain_device *port_dev = sas_port->port_dev; 1519 struct domain_device *device; 1520 1521 if (!port_dev || !dev_is_expander(port_dev->dev_type)) 1522 continue; 1523 1524 /* Try to find a SATA device */ 1525 list_for_each_entry(device, &sas_port->dev_list, 1526 dev_list_node) { 1527 if (dev_is_sata(device)) { 1528 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1529 sas_port, 1530 device); 1531 break; 1532 } 1533 } 1534 } 1535 } 1536 1537 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1538 { 1539 struct Scsi_Host *shost = hisi_hba->shost; 1540 1541 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1542 1543 scsi_block_requests(shost); 1544 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1545 1546 /* 1547 * hisi_hba->timer is only used for v1/v2 hw, and check hw->sht 1548 * which is also only used for v1/v2 hw to skip it for v3 hw 1549 */ 1550 if (hisi_hba->hw->sht) 1551 timer_delete_sync(&hisi_hba->timer); 1552 1553 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1554 } 1555 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1556 1557 static void hisi_sas_async_init_wait_phyup(void *data, async_cookie_t cookie) 1558 { 1559 struct hisi_sas_phy *phy = data; 1560 struct hisi_hba *hisi_hba = phy->hisi_hba; 1561 struct device *dev = hisi_hba->dev; 1562 DECLARE_COMPLETION_ONSTACK(completion); 1563 int phy_no = phy->sas_phy.id; 1564 1565 phy->reset_completion = &completion; 1566 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1567 if (!wait_for_completion_timeout(&completion, 1568 HISI_SAS_WAIT_PHYUP_TIMEOUT)) 1569 dev_warn(dev, "phy%d wait phyup timed out\n", phy_no); 1570 1571 phy->reset_completion = NULL; 1572 } 1573 1574 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1575 { 1576 struct Scsi_Host *shost = hisi_hba->shost; 1577 ASYNC_DOMAIN_EXCLUSIVE(async); 1578 int phy_no; 1579 1580 /* Init and wait for PHYs to come up and all libsas event finished. */ 1581 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1582 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1583 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1584 1585 if (!sas_phy->phy->enabled) 1586 continue; 1587 1588 if (!(hisi_hba->phy_state & BIT(phy_no))) { 1589 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1590 continue; 1591 } 1592 1593 async_schedule_domain(hisi_sas_async_init_wait_phyup, 1594 phy, &async); 1595 } 1596 1597 async_synchronize_full_domain(&async); 1598 hisi_sas_refresh_port_id(hisi_hba); 1599 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1600 1601 if (hisi_hba->reject_stp_links_msk) 1602 hisi_sas_terminate_stp_reject(hisi_hba); 1603 hisi_sas_reset_init_all_devices(hisi_hba); 1604 scsi_unblock_requests(shost); 1605 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1606 up(&hisi_hba->sem); 1607 1608 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); 1609 } 1610 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1611 1612 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) 1613 { 1614 if (!hisi_hba->hw->soft_reset) 1615 return -ENOENT; 1616 1617 down(&hisi_hba->sem); 1618 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1619 up(&hisi_hba->sem); 1620 return -EPERM; 1621 } 1622 1623 if (hisi_sas_debugfs_enable) 1624 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 1625 1626 return 0; 1627 } 1628 1629 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1630 { 1631 struct device *dev = hisi_hba->dev; 1632 struct Scsi_Host *shost = hisi_hba->shost; 1633 int rc; 1634 1635 dev_info(dev, "controller resetting...\n"); 1636 hisi_sas_controller_reset_prepare(hisi_hba); 1637 1638 rc = hisi_hba->hw->soft_reset(hisi_hba); 1639 if (rc) { 1640 dev_warn(dev, "controller reset failed (%d)\n", rc); 1641 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1642 up(&hisi_hba->sem); 1643 scsi_unblock_requests(shost); 1644 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1645 return rc; 1646 } 1647 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1648 1649 hisi_sas_controller_reset_done(hisi_hba); 1650 dev_info(dev, "controller reset complete\n"); 1651 1652 return 0; 1653 } 1654 1655 static int hisi_sas_abort_task(struct sas_task *task) 1656 { 1657 struct hisi_sas_internal_abort_data internal_abort_data = { false }; 1658 struct domain_device *device = task->dev; 1659 struct hisi_sas_device *sas_dev = device->lldd_dev; 1660 struct hisi_sas_slot *slot = task->lldd_task; 1661 struct hisi_hba *hisi_hba; 1662 struct device *dev; 1663 int rc = TMF_RESP_FUNC_FAILED; 1664 unsigned long flags; 1665 1666 if (!sas_dev) 1667 return TMF_RESP_FUNC_FAILED; 1668 1669 hisi_hba = dev_to_hisi_hba(task->dev); 1670 dev = hisi_hba->dev; 1671 1672 spin_lock_irqsave(&task->task_state_lock, flags); 1673 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1674 struct hisi_sas_cq *cq; 1675 1676 if (slot) { 1677 /* 1678 * sync irq or poll queue to avoid free'ing task 1679 * before using task in IO completion 1680 */ 1681 cq = &hisi_hba->cq[slot->dlvry_queue]; 1682 hisi_sas_sync_cq(cq); 1683 } 1684 spin_unlock_irqrestore(&task->task_state_lock, flags); 1685 rc = TMF_RESP_FUNC_COMPLETE; 1686 goto out; 1687 } 1688 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1689 spin_unlock_irqrestore(&task->task_state_lock, flags); 1690 1691 if (!slot) 1692 goto out; 1693 1694 if (task->task_proto & SAS_PROTOCOL_SSP) { 1695 u16 tag = slot->idx; 1696 int rc2; 1697 1698 rc = sas_abort_task(task, tag); 1699 rc2 = sas_execute_internal_abort_single(device, tag, 1700 slot->dlvry_queue, &internal_abort_data); 1701 if (rc2 < 0) { 1702 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1703 return TMF_RESP_FUNC_FAILED; 1704 } 1705 1706 /* 1707 * If the TMF finds that the IO is not in the device and also 1708 * the internal abort does not succeed, then it is safe to 1709 * free the slot. 1710 * Note: if the internal abort succeeds then the slot 1711 * will have already been completed 1712 */ 1713 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1714 if (task->lldd_task) 1715 hisi_sas_do_release_task(hisi_hba, task, slot, true); 1716 } 1717 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1718 task->task_proto & SAS_PROTOCOL_STP) { 1719 if (task->dev->dev_type == SAS_SATA_DEV) { 1720 struct ata_queued_cmd *qc = task->uldd_task; 1721 1722 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1723 if (rc < 0) { 1724 dev_err(dev, "abort task: internal abort failed\n"); 1725 goto out; 1726 } 1727 hisi_sas_dereg_device(hisi_hba, device); 1728 1729 /* 1730 * If an ATA internal command times out in ATA EH, it 1731 * need to execute soft reset, so check the scsicmd 1732 */ 1733 if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) && 1734 qc && qc->scsicmd) { 1735 hisi_sas_do_release_task(hisi_hba, task, slot, true); 1736 rc = TMF_RESP_FUNC_COMPLETE; 1737 } else { 1738 rc = hisi_sas_softreset_ata_disk(device); 1739 } 1740 } 1741 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 1742 /* SMP */ 1743 u32 tag = slot->idx; 1744 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1745 1746 rc = sas_execute_internal_abort_single(device, 1747 tag, slot->dlvry_queue, 1748 &internal_abort_data); 1749 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1750 task->lldd_task) { 1751 /* 1752 * sync irq or poll queue to avoid free'ing task 1753 * before using task in IO completion 1754 */ 1755 hisi_sas_sync_cq(cq); 1756 slot->task = NULL; 1757 } 1758 } 1759 1760 out: 1761 if (rc != TMF_RESP_FUNC_COMPLETE) 1762 dev_notice(dev, "abort task: rc=%d\n", rc); 1763 return rc; 1764 } 1765 1766 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1767 { 1768 struct hisi_sas_device *sas_dev = device->lldd_dev; 1769 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1770 struct device *dev = hisi_hba->dev; 1771 int rc; 1772 1773 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1774 if (rc < 0) { 1775 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1776 return TMF_RESP_FUNC_FAILED; 1777 } 1778 hisi_sas_dereg_device(hisi_hba, device); 1779 1780 rc = sas_abort_task_set(device, lun); 1781 if (rc == TMF_RESP_FUNC_COMPLETE) 1782 hisi_sas_release_task(hisi_hba, device); 1783 1784 return rc; 1785 } 1786 1787 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1788 { 1789 struct sas_phy *local_phy = sas_get_local_phy(device); 1790 struct hisi_sas_device *sas_dev = device->lldd_dev; 1791 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1792 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1793 int rc, reset_type; 1794 1795 if (!local_phy->enabled) { 1796 sas_put_local_phy(local_phy); 1797 return -ENODEV; 1798 } 1799 1800 if (scsi_is_sas_phy_local(local_phy)) { 1801 struct asd_sas_phy *sas_phy = 1802 sas_ha->sas_phy[local_phy->number]; 1803 struct hisi_sas_phy *phy = 1804 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1805 unsigned long flags; 1806 1807 spin_lock_irqsave(&phy->lock, flags); 1808 phy->in_reset = 1; 1809 spin_unlock_irqrestore(&phy->lock, flags); 1810 } 1811 1812 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1813 !dev_is_sata(device)) ? true : false; 1814 1815 rc = sas_phy_reset(local_phy, reset_type); 1816 sas_put_local_phy(local_phy); 1817 1818 if (scsi_is_sas_phy_local(local_phy)) { 1819 struct asd_sas_phy *sas_phy = 1820 sas_ha->sas_phy[local_phy->number]; 1821 struct hisi_sas_phy *phy = 1822 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1823 unsigned long flags; 1824 1825 spin_lock_irqsave(&phy->lock, flags); 1826 phy->in_reset = 0; 1827 spin_unlock_irqrestore(&phy->lock, flags); 1828 1829 /* report PHY down if timed out */ 1830 if (rc == -ETIMEDOUT) 1831 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); 1832 return rc; 1833 } 1834 1835 /* Remote phy */ 1836 if (rc) 1837 return rc; 1838 1839 if (dev_is_sata(device)) { 1840 struct ata_link *link = &device->sata_dev.ap->link; 1841 1842 rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT, 1843 smp_ata_check_ready_type); 1844 } else { 1845 msleep(2000); 1846 } 1847 1848 return rc; 1849 } 1850 1851 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1852 { 1853 struct hisi_sas_device *sas_dev = device->lldd_dev; 1854 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1855 struct device *dev = hisi_hba->dev; 1856 int rc; 1857 1858 if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) 1859 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 1860 1861 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1862 if (rc < 0) { 1863 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1864 return TMF_RESP_FUNC_FAILED; 1865 } 1866 hisi_sas_dereg_device(hisi_hba, device); 1867 1868 rc = hisi_sas_debug_I_T_nexus_reset(device); 1869 if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { 1870 struct sas_phy *local_phy; 1871 1872 rc = hisi_sas_softreset_ata_disk(device); 1873 switch (rc) { 1874 case -ECOMM: 1875 rc = -ENODEV; 1876 break; 1877 case TMF_RESP_FUNC_FAILED: 1878 case -EMSGSIZE: 1879 case -EIO: 1880 local_phy = sas_get_local_phy(device); 1881 rc = sas_phy_enable(local_phy, 0); 1882 if (!rc) { 1883 local_phy->enabled = 0; 1884 dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", 1885 SAS_ADDR(device->sas_addr), rc); 1886 rc = -ENODEV; 1887 } 1888 sas_put_local_phy(local_phy); 1889 break; 1890 default: 1891 break; 1892 } 1893 } 1894 1895 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1896 hisi_sas_release_task(hisi_hba, device); 1897 1898 return rc; 1899 } 1900 1901 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1902 { 1903 struct hisi_sas_device *sas_dev = device->lldd_dev; 1904 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1905 struct device *dev = hisi_hba->dev; 1906 int rc = TMF_RESP_FUNC_FAILED; 1907 1908 /* Clear internal IO and then lu reset */ 1909 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1910 if (rc < 0) { 1911 dev_err(dev, "lu_reset: internal abort failed\n"); 1912 goto out; 1913 } 1914 hisi_sas_dereg_device(hisi_hba, device); 1915 1916 if (dev_is_sata(device)) { 1917 struct sas_phy *phy; 1918 1919 phy = sas_get_local_phy(device); 1920 1921 rc = sas_phy_reset(phy, true); 1922 1923 if (rc == 0) 1924 hisi_sas_release_task(hisi_hba, device); 1925 sas_put_local_phy(phy); 1926 } else { 1927 rc = sas_lu_reset(device, lun); 1928 if (rc == TMF_RESP_FUNC_COMPLETE) 1929 hisi_sas_release_task(hisi_hba, device); 1930 } 1931 out: 1932 if (rc != TMF_RESP_FUNC_COMPLETE) 1933 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1934 sas_dev->device_id, rc); 1935 return rc; 1936 } 1937 1938 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) 1939 { 1940 struct domain_device *device = data; 1941 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1942 int rc; 1943 1944 rc = hisi_sas_debug_I_T_nexus_reset(device); 1945 if (rc != TMF_RESP_FUNC_COMPLETE) 1946 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", 1947 SAS_ADDR(device->sas_addr), rc); 1948 } 1949 1950 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1951 { 1952 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1953 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1954 ASYNC_DOMAIN_EXCLUSIVE(async); 1955 int i; 1956 1957 queue_work(hisi_hba->wq, &r.work); 1958 wait_for_completion(r.completion); 1959 if (!r.done) 1960 return TMF_RESP_FUNC_FAILED; 1961 1962 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1963 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1964 struct domain_device *device = sas_dev->sas_device; 1965 1966 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1967 dev_is_expander(device->dev_type)) 1968 continue; 1969 1970 async_schedule_domain(hisi_sas_async_I_T_nexus_reset, 1971 device, &async); 1972 } 1973 1974 async_synchronize_full_domain(&async); 1975 hisi_sas_release_tasks(hisi_hba); 1976 1977 return TMF_RESP_FUNC_COMPLETE; 1978 } 1979 1980 static int hisi_sas_query_task(struct sas_task *task) 1981 { 1982 int rc = TMF_RESP_FUNC_FAILED; 1983 1984 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1985 struct hisi_sas_slot *slot = task->lldd_task; 1986 u32 tag = slot->idx; 1987 1988 rc = sas_query_task(task, tag); 1989 switch (rc) { 1990 /* The task is still in Lun, release it then */ 1991 case TMF_RESP_FUNC_SUCC: 1992 /* The task is not in Lun or failed, reset the phy */ 1993 case TMF_RESP_FUNC_FAILED: 1994 case TMF_RESP_FUNC_COMPLETE: 1995 break; 1996 default: 1997 rc = TMF_RESP_FUNC_FAILED; 1998 break; 1999 } 2000 } 2001 return rc; 2002 } 2003 2004 static bool hisi_sas_internal_abort_timeout(struct sas_task *task, 2005 void *data) 2006 { 2007 struct domain_device *device = task->dev; 2008 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 2009 struct hisi_sas_internal_abort_data *timeout = data; 2010 2011 if (hisi_sas_debugfs_enable) { 2012 /* 2013 * If timeout occurs in device gone scenario, to avoid 2014 * circular dependency like: 2015 * hisi_sas_dev_gone() -> down() -> ... -> 2016 * hisi_sas_internal_abort_timeout() -> down(). 2017 */ 2018 if (!timeout->rst_ha_timeout) 2019 down(&hisi_hba->sem); 2020 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 2021 if (!timeout->rst_ha_timeout) 2022 up(&hisi_hba->sem); 2023 } 2024 2025 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 2026 pr_err("Internal abort: timeout %016llx\n", 2027 SAS_ADDR(device->sas_addr)); 2028 } else { 2029 struct hisi_sas_slot *slot = task->lldd_task; 2030 2031 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 2032 2033 if (slot) { 2034 struct hisi_sas_cq *cq = 2035 &hisi_hba->cq[slot->dlvry_queue]; 2036 /* 2037 * sync irq or poll queue to avoid free'ing task 2038 * before using task in IO completion 2039 */ 2040 hisi_sas_sync_cq(cq); 2041 slot->task = NULL; 2042 } 2043 2044 if (timeout->rst_ha_timeout) { 2045 pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n", 2046 SAS_ADDR(device->sas_addr)); 2047 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2048 } else { 2049 pr_err("Internal abort: timeout and not done %016llx.\n", 2050 SAS_ADDR(device->sas_addr)); 2051 } 2052 2053 return true; 2054 } 2055 2056 return false; 2057 } 2058 2059 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 2060 { 2061 hisi_sas_port_notify_formed(sas_phy); 2062 } 2063 2064 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 2065 u8 reg_index, u8 reg_count, u8 *write_data) 2066 { 2067 struct hisi_hba *hisi_hba = sha->lldd_ha; 2068 2069 if (!hisi_hba->hw->write_gpio) 2070 return -EOPNOTSUPP; 2071 2072 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 2073 reg_index, reg_count, write_data); 2074 } 2075 2076 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 2077 { 2078 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2079 struct sas_phy *sphy = sas_phy->phy; 2080 unsigned long flags; 2081 2082 phy->phy_attached = 0; 2083 phy->phy_type = 0; 2084 phy->port = NULL; 2085 2086 spin_lock_irqsave(&phy->lock, flags); 2087 if (phy->enable) 2088 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 2089 else 2090 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 2091 spin_unlock_irqrestore(&phy->lock, flags); 2092 } 2093 2094 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, 2095 gfp_t gfp_flags) 2096 { 2097 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2098 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2099 struct device *dev = hisi_hba->dev; 2100 2101 if (rdy) { 2102 /* Phy down but ready */ 2103 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); 2104 hisi_sas_port_notify_formed(sas_phy); 2105 } else { 2106 struct hisi_sas_port *port = phy->port; 2107 2108 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || 2109 phy->in_reset) { 2110 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 2111 return; 2112 } 2113 /* Phy down and not ready */ 2114 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); 2115 sas_phy_disconnected(sas_phy); 2116 2117 if (port) { 2118 if (phy->phy_type & PORT_TYPE_SAS) { 2119 int port_id = port->id; 2120 2121 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 2122 port_id)) 2123 port->port_attached = 0; 2124 } else if (phy->phy_type & PORT_TYPE_SATA) 2125 port->port_attached = 0; 2126 } 2127 hisi_sas_phy_disconnected(phy); 2128 } 2129 } 2130 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 2131 2132 void hisi_sas_phy_bcast(struct hisi_sas_phy *phy) 2133 { 2134 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2135 struct hisi_hba *hisi_hba = phy->hisi_hba; 2136 2137 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) 2138 return; 2139 2140 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC); 2141 } 2142 EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast); 2143 2144 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2145 { 2146 struct hisi_hba *hisi_hba = shost_priv(shost); 2147 2148 if (reset_type != SCSI_ADAPTER_RESET) 2149 return -EOPNOTSUPP; 2150 2151 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2152 2153 return 0; 2154 } 2155 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2156 2157 struct scsi_transport_template *hisi_sas_stt; 2158 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2159 2160 static struct sas_domain_function_template hisi_sas_transport_ops = { 2161 .lldd_dev_found = hisi_sas_dev_found, 2162 .lldd_dev_gone = hisi_sas_dev_gone, 2163 .lldd_execute_task = hisi_sas_queue_command, 2164 .lldd_control_phy = hisi_sas_control_phy, 2165 .lldd_abort_task = hisi_sas_abort_task, 2166 .lldd_abort_task_set = hisi_sas_abort_task_set, 2167 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2168 .lldd_lu_reset = hisi_sas_lu_reset, 2169 .lldd_query_task = hisi_sas_query_task, 2170 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2171 .lldd_port_formed = hisi_sas_port_formed, 2172 .lldd_write_gpio = hisi_sas_write_gpio, 2173 .lldd_tmf_aborted = hisi_sas_tmf_aborted, 2174 .lldd_abort_timeout = hisi_sas_internal_abort_timeout, 2175 }; 2176 2177 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2178 { 2179 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; 2180 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2181 2182 for (i = 0; i < hisi_hba->queue_count; i++) { 2183 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2184 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2185 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2186 2187 s = sizeof(struct hisi_sas_cmd_hdr); 2188 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2189 memset(&cmd_hdr[j], 0, s); 2190 2191 dq->wr_point = 0; 2192 2193 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2194 memset(hisi_hba->complete_hdr[i], 0, s); 2195 cq->rd_point = 0; 2196 } 2197 2198 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2199 memset(hisi_hba->initial_fis, 0, s); 2200 2201 s = max_command_entries * sizeof(struct hisi_sas_iost); 2202 memset(hisi_hba->iost, 0, s); 2203 2204 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2205 memset(hisi_hba->breakpoint, 0, s); 2206 2207 s = sizeof(struct hisi_sas_sata_breakpoint); 2208 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2209 memset(&sata_breakpoint[j], 0, s); 2210 } 2211 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2212 2213 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2214 { 2215 struct device *dev = hisi_hba->dev; 2216 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; 2217 int max_command_entries_ru, sz_slot_buf_ru; 2218 int blk_cnt, slots_per_blk; 2219 2220 sema_init(&hisi_hba->sem, 1); 2221 spin_lock_init(&hisi_hba->lock); 2222 for (i = 0; i < hisi_hba->n_phy; i++) { 2223 hisi_sas_phy_init(hisi_hba, i); 2224 hisi_hba->port[i].port_attached = 0; 2225 hisi_hba->port[i].id = -1; 2226 } 2227 2228 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2229 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2230 hisi_hba->devices[i].device_id = i; 2231 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2232 } 2233 2234 for (i = 0; i < hisi_hba->queue_count; i++) { 2235 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2236 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2237 2238 /* Completion queue structure */ 2239 cq->id = i; 2240 cq->hisi_hba = hisi_hba; 2241 spin_lock_init(&cq->poll_lock); 2242 2243 /* Delivery queue structure */ 2244 spin_lock_init(&dq->lock); 2245 INIT_LIST_HEAD(&dq->list); 2246 dq->id = i; 2247 dq->hisi_hba = hisi_hba; 2248 2249 /* Delivery queue */ 2250 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2251 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2252 &hisi_hba->cmd_hdr_dma[i], 2253 GFP_KERNEL); 2254 if (!hisi_hba->cmd_hdr[i]) 2255 goto err_out; 2256 2257 /* Completion queue */ 2258 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2259 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2260 &hisi_hba->complete_hdr_dma[i], 2261 GFP_KERNEL); 2262 if (!hisi_hba->complete_hdr[i]) 2263 goto err_out; 2264 } 2265 2266 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2267 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2268 GFP_KERNEL); 2269 if (!hisi_hba->itct) 2270 goto err_out; 2271 2272 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2273 sizeof(struct hisi_sas_slot), 2274 GFP_KERNEL); 2275 if (!hisi_hba->slot_info) 2276 goto err_out; 2277 2278 /* roundup to avoid overly large block size */ 2279 max_command_entries_ru = roundup(max_command_entries, 64); 2280 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2281 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2282 else 2283 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2284 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2285 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); 2286 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2287 slots_per_blk = s / sz_slot_buf_ru; 2288 2289 for (i = 0; i < blk_cnt; i++) { 2290 int slot_index = i * slots_per_blk; 2291 dma_addr_t buf_dma; 2292 void *buf; 2293 2294 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2295 GFP_KERNEL); 2296 if (!buf) 2297 goto err_out; 2298 2299 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2300 struct hisi_sas_slot *slot; 2301 2302 slot = &hisi_hba->slot_info[slot_index]; 2303 slot->buf = buf; 2304 slot->buf_dma = buf_dma; 2305 slot->idx = slot_index; 2306 2307 buf += sz_slot_buf_ru; 2308 buf_dma += sz_slot_buf_ru; 2309 } 2310 } 2311 2312 s = max_command_entries * sizeof(struct hisi_sas_iost); 2313 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2314 GFP_KERNEL); 2315 if (!hisi_hba->iost) 2316 goto err_out; 2317 2318 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2319 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2320 &hisi_hba->breakpoint_dma, 2321 GFP_KERNEL); 2322 if (!hisi_hba->breakpoint) 2323 goto err_out; 2324 2325 s = hisi_hba->slot_index_count = max_command_entries; 2326 hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); 2327 if (!hisi_hba->slot_index_tags) 2328 goto err_out; 2329 2330 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2331 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2332 &hisi_hba->initial_fis_dma, 2333 GFP_KERNEL); 2334 if (!hisi_hba->initial_fis) 2335 goto err_out; 2336 2337 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2338 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2339 &hisi_hba->sata_breakpoint_dma, 2340 GFP_KERNEL); 2341 if (!hisi_hba->sata_breakpoint) 2342 goto err_out; 2343 2344 hisi_hba->last_slot_index = 0; 2345 2346 hisi_hba->wq = 2347 alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, dev_name(dev)); 2348 if (!hisi_hba->wq) { 2349 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2350 goto err_out; 2351 } 2352 2353 return 0; 2354 err_out: 2355 return -ENOMEM; 2356 } 2357 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2358 2359 void hisi_sas_free(struct hisi_hba *hisi_hba) 2360 { 2361 int i; 2362 2363 for (i = 0; i < hisi_hba->n_phy; i++) { 2364 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2365 2366 timer_delete_sync(&phy->timer); 2367 } 2368 2369 if (hisi_hba->wq) 2370 destroy_workqueue(hisi_hba->wq); 2371 } 2372 EXPORT_SYMBOL_GPL(hisi_sas_free); 2373 2374 void hisi_sas_rst_work_handler(struct work_struct *work) 2375 { 2376 struct hisi_hba *hisi_hba = 2377 container_of(work, struct hisi_hba, rst_work); 2378 2379 if (hisi_sas_controller_prereset(hisi_hba)) 2380 return; 2381 2382 hisi_sas_controller_reset(hisi_hba); 2383 } 2384 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2385 2386 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2387 { 2388 struct hisi_sas_rst *rst = 2389 container_of(work, struct hisi_sas_rst, work); 2390 2391 if (hisi_sas_controller_prereset(rst->hisi_hba)) 2392 goto rst_complete; 2393 2394 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2395 rst->done = true; 2396 rst_complete: 2397 complete(rst->completion); 2398 } 2399 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2400 2401 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2402 { 2403 struct device *dev = hisi_hba->dev; 2404 struct platform_device *pdev = hisi_hba->platform_dev; 2405 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2406 struct clk *refclk; 2407 2408 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2409 SAS_ADDR_SIZE)) { 2410 dev_err(dev, "could not get property sas-addr\n"); 2411 return -ENOENT; 2412 } 2413 2414 if (np) { 2415 /* 2416 * These properties are only required for platform device-based 2417 * controller with DT firmware. 2418 */ 2419 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2420 "hisilicon,sas-syscon"); 2421 if (IS_ERR(hisi_hba->ctrl)) { 2422 dev_err(dev, "could not get syscon\n"); 2423 return -ENOENT; 2424 } 2425 2426 if (device_property_read_u32(dev, "ctrl-reset-reg", 2427 &hisi_hba->ctrl_reset_reg)) { 2428 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2429 return -ENOENT; 2430 } 2431 2432 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2433 &hisi_hba->ctrl_reset_sts_reg)) { 2434 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2435 return -ENOENT; 2436 } 2437 2438 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2439 &hisi_hba->ctrl_clock_ena_reg)) { 2440 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2441 return -ENOENT; 2442 } 2443 } 2444 2445 refclk = devm_clk_get(dev, NULL); 2446 if (IS_ERR(refclk)) 2447 dev_dbg(dev, "no ref clk property\n"); 2448 else 2449 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2450 2451 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2452 dev_err(dev, "could not get property phy-count\n"); 2453 return -ENOENT; 2454 } 2455 2456 if (device_property_read_u32(dev, "queue-count", 2457 &hisi_hba->queue_count)) { 2458 dev_err(dev, "could not get property queue-count\n"); 2459 return -ENOENT; 2460 } 2461 2462 return 0; 2463 } 2464 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2465 2466 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2467 const struct hisi_sas_hw *hw) 2468 { 2469 struct resource *res; 2470 struct Scsi_Host *shost; 2471 struct hisi_hba *hisi_hba; 2472 struct device *dev = &pdev->dev; 2473 int error; 2474 2475 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2476 if (!shost) { 2477 dev_err(dev, "scsi host alloc failed\n"); 2478 return NULL; 2479 } 2480 hisi_hba = shost_priv(shost); 2481 2482 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2483 hisi_hba->hw = hw; 2484 hisi_hba->dev = dev; 2485 hisi_hba->platform_dev = pdev; 2486 hisi_hba->shost = shost; 2487 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2488 2489 timer_setup(&hisi_hba->timer, NULL, 0); 2490 2491 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2492 goto err_out; 2493 2494 if (hisi_hba->hw->fw_info_check) { 2495 if (hisi_hba->hw->fw_info_check(hisi_hba)) 2496 goto err_out; 2497 } 2498 2499 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2500 if (error) { 2501 dev_err(dev, "No usable DMA addressing method\n"); 2502 goto err_out; 2503 } 2504 2505 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); 2506 if (IS_ERR(hisi_hba->regs)) 2507 goto err_out; 2508 2509 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2510 if (res) { 2511 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2512 if (IS_ERR(hisi_hba->sgpio_regs)) 2513 goto err_out; 2514 } 2515 2516 if (hisi_sas_alloc(hisi_hba)) { 2517 hisi_sas_free(hisi_hba); 2518 goto err_out; 2519 } 2520 2521 return shost; 2522 err_out: 2523 scsi_host_put(shost); 2524 dev_err(dev, "shost alloc failed\n"); 2525 return NULL; 2526 } 2527 2528 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) 2529 { 2530 if (hisi_hba->hw->interrupt_preinit) 2531 return hisi_hba->hw->interrupt_preinit(hisi_hba); 2532 return 0; 2533 } 2534 2535 int hisi_sas_probe(struct platform_device *pdev, 2536 const struct hisi_sas_hw *hw) 2537 { 2538 struct Scsi_Host *shost; 2539 struct hisi_hba *hisi_hba; 2540 struct device *dev = &pdev->dev; 2541 struct asd_sas_phy **arr_phy; 2542 struct asd_sas_port **arr_port; 2543 struct sas_ha_struct *sha; 2544 int rc, phy_nr, port_nr, i; 2545 2546 shost = hisi_sas_shost_alloc(pdev, hw); 2547 if (!shost) 2548 return -ENOMEM; 2549 2550 sha = SHOST_TO_SAS_HA(shost); 2551 hisi_hba = shost_priv(shost); 2552 platform_set_drvdata(pdev, sha); 2553 2554 phy_nr = port_nr = hisi_hba->n_phy; 2555 2556 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2557 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2558 if (!arr_phy || !arr_port) { 2559 rc = -ENOMEM; 2560 goto err_out_ha; 2561 } 2562 2563 sha->sas_phy = arr_phy; 2564 sha->sas_port = arr_port; 2565 sha->lldd_ha = hisi_hba; 2566 2567 shost->transportt = hisi_sas_stt; 2568 shost->max_id = HISI_SAS_MAX_DEVICES; 2569 shost->max_lun = ~0; 2570 shost->max_channel = 1; 2571 shost->max_cmd_len = 16; 2572 if (hisi_hba->hw->slot_index_alloc) { 2573 shost->can_queue = HISI_SAS_MAX_COMMANDS; 2574 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; 2575 } else { 2576 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 2577 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 2578 } 2579 2580 sha->sas_ha_name = DRV_NAME; 2581 sha->dev = hisi_hba->dev; 2582 sha->sas_addr = &hisi_hba->sas_addr[0]; 2583 sha->num_phys = hisi_hba->n_phy; 2584 sha->shost = hisi_hba->shost; 2585 2586 for (i = 0; i < hisi_hba->n_phy; i++) { 2587 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2588 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2589 } 2590 2591 rc = hisi_sas_interrupt_preinit(hisi_hba); 2592 if (rc) 2593 goto err_out_ha; 2594 2595 rc = scsi_add_host(shost, &pdev->dev); 2596 if (rc) 2597 goto err_out_ha; 2598 2599 rc = sas_register_ha(sha); 2600 if (rc) 2601 goto err_out_register_ha; 2602 2603 rc = hisi_hba->hw->hw_init(hisi_hba); 2604 if (rc) 2605 goto err_out_hw_init; 2606 2607 scsi_scan_host(shost); 2608 2609 return 0; 2610 2611 err_out_hw_init: 2612 sas_unregister_ha(sha); 2613 err_out_register_ha: 2614 scsi_remove_host(shost); 2615 err_out_ha: 2616 hisi_sas_free(hisi_hba); 2617 scsi_host_put(shost); 2618 return rc; 2619 } 2620 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2621 2622 void hisi_sas_remove(struct platform_device *pdev) 2623 { 2624 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2625 struct hisi_hba *hisi_hba = sha->lldd_ha; 2626 struct Scsi_Host *shost = sha->shost; 2627 2628 timer_delete_sync(&hisi_hba->timer); 2629 2630 sas_unregister_ha(sha); 2631 sas_remove_host(shost); 2632 2633 hisi_sas_free(hisi_hba); 2634 scsi_host_put(shost); 2635 } 2636 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2637 2638 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) 2639 #define DEBUGFS_ENABLE_DEFAULT "enabled" 2640 bool hisi_sas_debugfs_enable = true; 2641 u32 hisi_sas_debugfs_dump_count = 50; 2642 #else 2643 #define DEBUGFS_ENABLE_DEFAULT "disabled" 2644 bool hisi_sas_debugfs_enable; 2645 u32 hisi_sas_debugfs_dump_count = 1; 2646 #endif 2647 2648 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 2649 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 2650 MODULE_PARM_DESC(hisi_sas_debugfs_enable, 2651 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); 2652 2653 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); 2654 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); 2655 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); 2656 2657 struct dentry *hisi_sas_debugfs_dir; 2658 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); 2659 2660 static __init int hisi_sas_init(void) 2661 { 2662 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2663 if (!hisi_sas_stt) 2664 return -ENOMEM; 2665 2666 if (hisi_sas_debugfs_enable) { 2667 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 2668 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { 2669 pr_info("hisi_sas: Limiting debugfs dump count\n"); 2670 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; 2671 } 2672 } 2673 2674 return 0; 2675 } 2676 2677 static __exit void hisi_sas_exit(void) 2678 { 2679 if (hisi_sas_debugfs_enable) 2680 debugfs_remove(hisi_sas_debugfs_dir); 2681 2682 sas_release_transport(hisi_sas_stt); 2683 } 2684 2685 module_init(hisi_sas_init); 2686 module_exit(hisi_sas_exit); 2687 2688 MODULE_LICENSE("GPL"); 2689 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2690 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2691 MODULE_ALIAS("platform:" DRV_NAME); 2692