Lines Matching +full:supports +full:- +full:cqe

1 // SPDX-License-Identifier: GPL-2.0
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
65 struct list_head lsreq_list; /* rport->ls_req_list */
77 struct list_head lsrcv_list; /* rport->ls_rcv_list */
133 struct list_head endp_list; /* for lport->endp_list */
147 /* fc_ctrl flags values - specified as bit positions */
164 struct list_head ctrl_list; /* rport->ctrl_list */
228 * These items are short-term. They will eventually be moved into
235 /* *********************** FC-NVME Port Management ************************ */
250 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); in nvme_fc_free_lport()
251 WARN_ON(!list_empty(&lport->endp_list)); in nvme_fc_free_lport()
255 list_del(&lport->port_list); in nvme_fc_free_lport()
260 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); in nvme_fc_free_lport()
261 ida_destroy(&lport->endp_cnt); in nvme_fc_free_lport()
263 put_device(lport->dev); in nvme_fc_free_lport()
271 kref_put(&lport->ref, nvme_fc_free_lport); in nvme_fc_lport_put()
277 return kref_get_unless_zero(&lport->ref); in nvme_fc_lport_get()
292 if (lport->localport.node_name != pinfo->node_name || in nvme_fc_attach_to_unreg_lport()
293 lport->localport.port_name != pinfo->port_name) in nvme_fc_attach_to_unreg_lport()
296 if (lport->dev != dev) { in nvme_fc_attach_to_unreg_lport()
297 lport = ERR_PTR(-EXDEV); in nvme_fc_attach_to_unreg_lport()
301 if (lport->localport.port_state != FC_OBJSTATE_DELETED) { in nvme_fc_attach_to_unreg_lport()
302 lport = ERR_PTR(-EEXIST); in nvme_fc_attach_to_unreg_lport()
317 lport->ops = ops; in nvme_fc_attach_to_unreg_lport()
318 lport->localport.port_role = pinfo->port_role; in nvme_fc_attach_to_unreg_lport()
319 lport->localport.port_id = pinfo->port_id; in nvme_fc_attach_to_unreg_lport()
320 lport->localport.port_state = FC_OBJSTATE_ONLINE; in nvme_fc_attach_to_unreg_lport()
336 * nvme_fc_register_localport - transport entry point called by an
350 * (ex: -ENXIO) upon failure.
362 if (!template->localport_delete || !template->remoteport_delete || in nvme_fc_register_localport()
363 !template->ls_req || !template->fcp_io || in nvme_fc_register_localport()
364 !template->ls_abort || !template->fcp_abort || in nvme_fc_register_localport()
365 !template->max_hw_queues || !template->max_sgl_segments || in nvme_fc_register_localport()
366 !template->max_dif_sgl_segments || !template->dma_boundary) { in nvme_fc_register_localport()
367 ret = -EINVAL; in nvme_fc_register_localport()
375 * expired, we can simply re-enable the localport. Remoteports in nvme_fc_register_localport()
387 *portptr = &newrec->localport; in nvme_fc_register_localport()
391 /* nothing found - allocate a new localport struct */ in nvme_fc_register_localport()
393 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), in nvme_fc_register_localport()
396 ret = -ENOMEM; in nvme_fc_register_localport()
402 ret = -ENOSPC; in nvme_fc_register_localport()
407 ret = -ENODEV; in nvme_fc_register_localport()
411 INIT_LIST_HEAD(&newrec->port_list); in nvme_fc_register_localport()
412 INIT_LIST_HEAD(&newrec->endp_list); in nvme_fc_register_localport()
413 kref_init(&newrec->ref); in nvme_fc_register_localport()
414 atomic_set(&newrec->act_rport_cnt, 0); in nvme_fc_register_localport()
415 newrec->ops = template; in nvme_fc_register_localport()
416 newrec->dev = dev; in nvme_fc_register_localport()
417 ida_init(&newrec->endp_cnt); in nvme_fc_register_localport()
418 if (template->local_priv_sz) in nvme_fc_register_localport()
419 newrec->localport.private = &newrec[1]; in nvme_fc_register_localport()
421 newrec->localport.private = NULL; in nvme_fc_register_localport()
422 newrec->localport.node_name = pinfo->node_name; in nvme_fc_register_localport()
423 newrec->localport.port_name = pinfo->port_name; in nvme_fc_register_localport()
424 newrec->localport.port_role = pinfo->port_role; in nvme_fc_register_localport()
425 newrec->localport.port_id = pinfo->port_id; in nvme_fc_register_localport()
426 newrec->localport.port_state = FC_OBJSTATE_ONLINE; in nvme_fc_register_localport()
427 newrec->localport.port_num = idx; in nvme_fc_register_localport()
430 list_add_tail(&newrec->port_list, &nvme_fc_lport_list); in nvme_fc_register_localport()
434 dma_set_seg_boundary(dev, template->dma_boundary); in nvme_fc_register_localport()
436 *portptr = &newrec->localport; in nvme_fc_register_localport()
451 * nvme_fc_unregister_localport - transport entry point called by an
458 * (ex: -ENXIO) upon failure.
467 return -EINVAL; in nvme_fc_unregister_localport()
471 if (portptr->port_state != FC_OBJSTATE_ONLINE) { in nvme_fc_unregister_localport()
473 return -EINVAL; in nvme_fc_unregister_localport()
475 portptr->port_state = FC_OBJSTATE_DELETED; in nvme_fc_unregister_localport()
479 if (atomic_read(&lport->act_rport_cnt) == 0) in nvme_fc_unregister_localport()
480 lport->ops->localport_delete(&lport->localport); in nvme_fc_unregister_localport()
489 * TRADDR strings, per FC-NVME are fixed format:
490 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
493 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
506 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) in nvme_fc_signal_discovery_scan()
510 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", in nvme_fc_signal_discovery_scan()
511 lport->localport.node_name, lport->localport.port_name); in nvme_fc_signal_discovery_scan()
513 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", in nvme_fc_signal_discovery_scan()
514 rport->remoteport.node_name, rport->remoteport.port_name); in nvme_fc_signal_discovery_scan()
515 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); in nvme_fc_signal_discovery_scan()
524 localport_to_lport(rport->remoteport.localport); in nvme_fc_free_rport()
527 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); in nvme_fc_free_rport()
528 WARN_ON(!list_empty(&rport->ctrl_list)); in nvme_fc_free_rport()
532 list_del(&rport->endp_list); in nvme_fc_free_rport()
535 WARN_ON(!list_empty(&rport->disc_list)); in nvme_fc_free_rport()
536 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); in nvme_fc_free_rport()
546 kref_put(&rport->ref, nvme_fc_free_rport); in nvme_fc_rport_put()
552 return kref_get_unless_zero(&rport->ref); in nvme_fc_rport_get()
558 switch (ctrl->ctrl.state) { in nvme_fc_resume_controller()
565 dev_info(ctrl->ctrl.device, in nvme_fc_resume_controller()
566 "NVME-FC{%d}: connectivity re-established. " in nvme_fc_resume_controller()
567 "Attempting reconnect\n", ctrl->cnum); in nvme_fc_resume_controller()
569 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); in nvme_fc_resume_controller()
581 /* no action to take - let it delete */ in nvme_fc_resume_controller()
596 list_for_each_entry(rport, &lport->endp_list, endp_list) { in nvme_fc_attach_to_suspended_rport()
597 if (rport->remoteport.node_name != pinfo->node_name || in nvme_fc_attach_to_suspended_rport()
598 rport->remoteport.port_name != pinfo->port_name) in nvme_fc_attach_to_suspended_rport()
602 rport = ERR_PTR(-ENOLCK); in nvme_fc_attach_to_suspended_rport()
608 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_attach_to_suspended_rport()
611 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { in nvme_fc_attach_to_suspended_rport()
613 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_attach_to_suspended_rport()
615 return ERR_PTR(-ESTALE); in nvme_fc_attach_to_suspended_rport()
618 rport->remoteport.port_role = pinfo->port_role; in nvme_fc_attach_to_suspended_rport()
619 rport->remoteport.port_id = pinfo->port_id; in nvme_fc_attach_to_suspended_rport()
620 rport->remoteport.port_state = FC_OBJSTATE_ONLINE; in nvme_fc_attach_to_suspended_rport()
621 rport->dev_loss_end = 0; in nvme_fc_attach_to_suspended_rport()
627 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) in nvme_fc_attach_to_suspended_rport()
630 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_attach_to_suspended_rport()
647 if (pinfo->dev_loss_tmo) in __nvme_fc_set_dev_loss_tmo()
648 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; in __nvme_fc_set_dev_loss_tmo()
650 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; in __nvme_fc_set_dev_loss_tmo()
654 * nvme_fc_register_remoteport - transport entry point called by an
667 * (ex: -ENXIO) upon failure.
680 ret = -ESHUTDOWN; in nvme_fc_register_remoteport()
701 *portptr = &newrec->remoteport; in nvme_fc_register_remoteport()
705 /* nothing found - allocate a new remoteport struct */ in nvme_fc_register_remoteport()
707 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), in nvme_fc_register_remoteport()
710 ret = -ENOMEM; in nvme_fc_register_remoteport()
714 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL); in nvme_fc_register_remoteport()
716 ret = -ENOSPC; in nvme_fc_register_remoteport()
720 INIT_LIST_HEAD(&newrec->endp_list); in nvme_fc_register_remoteport()
721 INIT_LIST_HEAD(&newrec->ctrl_list); in nvme_fc_register_remoteport()
722 INIT_LIST_HEAD(&newrec->ls_req_list); in nvme_fc_register_remoteport()
723 INIT_LIST_HEAD(&newrec->disc_list); in nvme_fc_register_remoteport()
724 kref_init(&newrec->ref); in nvme_fc_register_remoteport()
725 atomic_set(&newrec->act_ctrl_cnt, 0); in nvme_fc_register_remoteport()
726 spin_lock_init(&newrec->lock); in nvme_fc_register_remoteport()
727 newrec->remoteport.localport = &lport->localport; in nvme_fc_register_remoteport()
728 INIT_LIST_HEAD(&newrec->ls_rcv_list); in nvme_fc_register_remoteport()
729 newrec->dev = lport->dev; in nvme_fc_register_remoteport()
730 newrec->lport = lport; in nvme_fc_register_remoteport()
731 if (lport->ops->remote_priv_sz) in nvme_fc_register_remoteport()
732 newrec->remoteport.private = &newrec[1]; in nvme_fc_register_remoteport()
734 newrec->remoteport.private = NULL; in nvme_fc_register_remoteport()
735 newrec->remoteport.port_role = pinfo->port_role; in nvme_fc_register_remoteport()
736 newrec->remoteport.node_name = pinfo->node_name; in nvme_fc_register_remoteport()
737 newrec->remoteport.port_name = pinfo->port_name; in nvme_fc_register_remoteport()
738 newrec->remoteport.port_id = pinfo->port_id; in nvme_fc_register_remoteport()
739 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; in nvme_fc_register_remoteport()
740 newrec->remoteport.port_num = idx; in nvme_fc_register_remoteport()
742 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work); in nvme_fc_register_remoteport()
745 list_add_tail(&newrec->endp_list, &lport->endp_list); in nvme_fc_register_remoteport()
750 *portptr = &newrec->remoteport; in nvme_fc_register_remoteport()
770 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_abort_lsops()
772 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { in nvme_fc_abort_lsops()
773 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { in nvme_fc_abort_lsops()
774 lsop->flags |= FCOP_FLAGS_TERMIO; in nvme_fc_abort_lsops()
775 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_abort_lsops()
776 rport->lport->ops->ls_abort(&rport->lport->localport, in nvme_fc_abort_lsops()
777 &rport->remoteport, in nvme_fc_abort_lsops()
778 &lsop->ls_req); in nvme_fc_abort_lsops()
782 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_abort_lsops()
790 dev_info(ctrl->ctrl.device, in nvme_fc_ctrl_connectivity_loss()
791 "NVME-FC{%d}: controller connectivity lost. Awaiting " in nvme_fc_ctrl_connectivity_loss()
792 "Reconnect", ctrl->cnum); in nvme_fc_ctrl_connectivity_loss()
794 switch (ctrl->ctrl.state) { in nvme_fc_ctrl_connectivity_loss()
804 if (nvme_reset_ctrl(&ctrl->ctrl)) { in nvme_fc_ctrl_connectivity_loss()
805 dev_warn(ctrl->ctrl.device, in nvme_fc_ctrl_connectivity_loss()
806 "NVME-FC{%d}: Couldn't schedule reset.\n", in nvme_fc_ctrl_connectivity_loss()
807 ctrl->cnum); in nvme_fc_ctrl_connectivity_loss()
808 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_ctrl_connectivity_loss()
834 /* no action to take - let it delete */ in nvme_fc_ctrl_connectivity_loss()
840 * nvme_fc_unregister_remoteport - transport entry point called by an
848 * (ex: -ENXIO) upon failure.
858 return -EINVAL; in nvme_fc_unregister_remoteport()
860 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_unregister_remoteport()
862 if (portptr->port_state != FC_OBJSTATE_ONLINE) { in nvme_fc_unregister_remoteport()
863 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_unregister_remoteport()
864 return -EINVAL; in nvme_fc_unregister_remoteport()
866 portptr->port_state = FC_OBJSTATE_DELETED; in nvme_fc_unregister_remoteport()
868 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); in nvme_fc_unregister_remoteport()
870 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_unregister_remoteport()
872 if (!portptr->dev_loss_tmo) { in nvme_fc_unregister_remoteport()
873 dev_warn(ctrl->ctrl.device, in nvme_fc_unregister_remoteport()
874 "NVME-FC{%d}: controller connectivity lost.\n", in nvme_fc_unregister_remoteport()
875 ctrl->cnum); in nvme_fc_unregister_remoteport()
876 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_unregister_remoteport()
881 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_unregister_remoteport()
885 if (atomic_read(&rport->act_ctrl_cnt) == 0) in nvme_fc_unregister_remoteport()
886 rport->lport->ops->remoteport_delete(portptr); in nvme_fc_unregister_remoteport()
900 * nvme_fc_rescan_remoteport - transport entry point called by an
912 nvme_fc_signal_discovery_scan(rport->lport, rport); in nvme_fc_rescan_remoteport()
923 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_set_remoteport_devloss()
925 if (portptr->port_state != FC_OBJSTATE_ONLINE) { in nvme_fc_set_remoteport_devloss()
926 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_set_remoteport_devloss()
927 return -EINVAL; in nvme_fc_set_remoteport_devloss()
931 rport->remoteport.dev_loss_tmo = dev_loss_tmo; in nvme_fc_set_remoteport_devloss()
933 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_set_remoteport_devloss()
940 /* *********************** FC-NVME DMA Handling **************************** */
1005 s->dma_address = 0L; in fc_map_sg()
1007 s->dma_length = s->length; in fc_map_sg()
1028 /* *********************** FC-NVME LS Handling **************************** */
1038 struct nvme_fc_rport *rport = lsop->rport; in __nvme_fc_finish_ls_req()
1039 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvme_fc_finish_ls_req()
1042 spin_lock_irqsave(&rport->lock, flags); in __nvme_fc_finish_ls_req()
1044 if (!lsop->req_queued) { in __nvme_fc_finish_ls_req()
1045 spin_unlock_irqrestore(&rport->lock, flags); in __nvme_fc_finish_ls_req()
1049 list_del(&lsop->lsreq_list); in __nvme_fc_finish_ls_req()
1051 lsop->req_queued = false; in __nvme_fc_finish_ls_req()
1053 spin_unlock_irqrestore(&rport->lock, flags); in __nvme_fc_finish_ls_req()
1055 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, in __nvme_fc_finish_ls_req()
1056 (lsreq->rqstlen + lsreq->rsplen), in __nvme_fc_finish_ls_req()
1067 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvme_fc_send_ls_req()
1071 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in __nvme_fc_send_ls_req()
1072 return -ECONNREFUSED; in __nvme_fc_send_ls_req()
1075 return -ESHUTDOWN; in __nvme_fc_send_ls_req()
1077 lsreq->done = done; in __nvme_fc_send_ls_req()
1078 lsop->rport = rport; in __nvme_fc_send_ls_req()
1079 lsop->req_queued = false; in __nvme_fc_send_ls_req()
1080 INIT_LIST_HEAD(&lsop->lsreq_list); in __nvme_fc_send_ls_req()
1081 init_completion(&lsop->ls_done); in __nvme_fc_send_ls_req()
1083 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, in __nvme_fc_send_ls_req()
1084 lsreq->rqstlen + lsreq->rsplen, in __nvme_fc_send_ls_req()
1086 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { in __nvme_fc_send_ls_req()
1087 ret = -EFAULT; in __nvme_fc_send_ls_req()
1090 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; in __nvme_fc_send_ls_req()
1092 spin_lock_irqsave(&rport->lock, flags); in __nvme_fc_send_ls_req()
1094 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); in __nvme_fc_send_ls_req()
1096 lsop->req_queued = true; in __nvme_fc_send_ls_req()
1098 spin_unlock_irqrestore(&rport->lock, flags); in __nvme_fc_send_ls_req()
1100 ret = rport->lport->ops->ls_req(&rport->lport->localport, in __nvme_fc_send_ls_req()
1101 &rport->remoteport, lsreq); in __nvme_fc_send_ls_req()
1108 lsop->ls_error = ret; in __nvme_fc_send_ls_req()
1109 spin_lock_irqsave(&rport->lock, flags); in __nvme_fc_send_ls_req()
1110 lsop->req_queued = false; in __nvme_fc_send_ls_req()
1111 list_del(&lsop->lsreq_list); in __nvme_fc_send_ls_req()
1112 spin_unlock_irqrestore(&rport->lock, flags); in __nvme_fc_send_ls_req()
1113 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, in __nvme_fc_send_ls_req()
1114 (lsreq->rqstlen + lsreq->rsplen), in __nvme_fc_send_ls_req()
1127 lsop->ls_error = status; in nvme_fc_send_ls_req_done()
1128 complete(&lsop->ls_done); in nvme_fc_send_ls_req_done()
1134 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in nvme_fc_send_ls_req()
1135 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; in nvme_fc_send_ls_req()
1147 wait_for_completion(&lsop->ls_done); in nvme_fc_send_ls_req()
1151 ret = lsop->ls_error; in nvme_fc_send_ls_req()
1158 if (rjt->w0.ls_cmd == FCNVME_LS_RJT) in nvme_fc_send_ls_req()
1159 return -ENXIO; in nvme_fc_send_ls_req()
1187 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_connect_admin_queue()
1189 dev_info(ctrl->ctrl.device, in nvme_fc_connect_admin_queue()
1190 "NVME-FC{%d}: send Create Association failed: ENOMEM\n", in nvme_fc_connect_admin_queue()
1191 ctrl->cnum); in nvme_fc_connect_admin_queue()
1192 ret = -ENOMEM; in nvme_fc_connect_admin_queue()
1198 lsreq = &lsop->ls_req; in nvme_fc_connect_admin_queue()
1199 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_connect_admin_queue()
1200 lsreq->private = &assoc_acc[1]; in nvme_fc_connect_admin_queue()
1202 lsreq->private = NULL; in nvme_fc_connect_admin_queue()
1204 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; in nvme_fc_connect_admin_queue()
1205 assoc_rqst->desc_list_len = in nvme_fc_connect_admin_queue()
1208 assoc_rqst->assoc_cmd.desc_tag = in nvme_fc_connect_admin_queue()
1210 assoc_rqst->assoc_cmd.desc_len = in nvme_fc_connect_admin_queue()
1214 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); in nvme_fc_connect_admin_queue()
1215 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_admin_queue()
1216 /* Linux supports only Dynamic controllers */ in nvme_fc_connect_admin_queue()
1217 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); in nvme_fc_connect_admin_queue()
1218 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); in nvme_fc_connect_admin_queue()
1219 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, in nvme_fc_connect_admin_queue()
1221 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, in nvme_fc_connect_admin_queue()
1224 lsop->queue = queue; in nvme_fc_connect_admin_queue()
1225 lsreq->rqstaddr = assoc_rqst; in nvme_fc_connect_admin_queue()
1226 lsreq->rqstlen = sizeof(*assoc_rqst); in nvme_fc_connect_admin_queue()
1227 lsreq->rspaddr = assoc_acc; in nvme_fc_connect_admin_queue()
1228 lsreq->rsplen = sizeof(*assoc_acc); in nvme_fc_connect_admin_queue()
1229 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; in nvme_fc_connect_admin_queue()
1231 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); in nvme_fc_connect_admin_queue()
1238 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) in nvme_fc_connect_admin_queue()
1240 else if (assoc_acc->hdr.desc_list_len != in nvme_fc_connect_admin_queue()
1244 else if (assoc_acc->hdr.rqst.desc_tag != in nvme_fc_connect_admin_queue()
1247 else if (assoc_acc->hdr.rqst.desc_len != in nvme_fc_connect_admin_queue()
1250 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) in nvme_fc_connect_admin_queue()
1252 else if (assoc_acc->associd.desc_tag != in nvme_fc_connect_admin_queue()
1255 else if (assoc_acc->associd.desc_len != in nvme_fc_connect_admin_queue()
1259 else if (assoc_acc->connectid.desc_tag != in nvme_fc_connect_admin_queue()
1262 else if (assoc_acc->connectid.desc_len != in nvme_fc_connect_admin_queue()
1267 ret = -EBADF; in nvme_fc_connect_admin_queue()
1268 dev_err(ctrl->dev, in nvme_fc_connect_admin_queue()
1270 queue->qnum, validation_errors[fcret]); in nvme_fc_connect_admin_queue()
1272 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_connect_admin_queue()
1273 ctrl->association_id = in nvme_fc_connect_admin_queue()
1274 be64_to_cpu(assoc_acc->associd.association_id); in nvme_fc_connect_admin_queue()
1275 queue->connection_id = in nvme_fc_connect_admin_queue()
1276 be64_to_cpu(assoc_acc->connectid.connection_id); in nvme_fc_connect_admin_queue()
1277 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); in nvme_fc_connect_admin_queue()
1278 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_connect_admin_queue()
1285 dev_err(ctrl->dev, in nvme_fc_connect_admin_queue()
1287 queue->qnum, ret); in nvme_fc_connect_admin_queue()
1303 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_connect_queue()
1305 dev_info(ctrl->ctrl.device, in nvme_fc_connect_queue()
1306 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n", in nvme_fc_connect_queue()
1307 ctrl->cnum); in nvme_fc_connect_queue()
1308 ret = -ENOMEM; in nvme_fc_connect_queue()
1314 lsreq = &lsop->ls_req; in nvme_fc_connect_queue()
1315 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_connect_queue()
1316 lsreq->private = (void *)&conn_acc[1]; in nvme_fc_connect_queue()
1318 lsreq->private = NULL; in nvme_fc_connect_queue()
1320 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; in nvme_fc_connect_queue()
1321 conn_rqst->desc_list_len = cpu_to_be32( in nvme_fc_connect_queue()
1325 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); in nvme_fc_connect_queue()
1326 conn_rqst->associd.desc_len = in nvme_fc_connect_queue()
1329 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); in nvme_fc_connect_queue()
1330 conn_rqst->connect_cmd.desc_tag = in nvme_fc_connect_queue()
1332 conn_rqst->connect_cmd.desc_len = in nvme_fc_connect_queue()
1335 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); in nvme_fc_connect_queue()
1336 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); in nvme_fc_connect_queue()
1337 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_queue()
1339 lsop->queue = queue; in nvme_fc_connect_queue()
1340 lsreq->rqstaddr = conn_rqst; in nvme_fc_connect_queue()
1341 lsreq->rqstlen = sizeof(*conn_rqst); in nvme_fc_connect_queue()
1342 lsreq->rspaddr = conn_acc; in nvme_fc_connect_queue()
1343 lsreq->rsplen = sizeof(*conn_acc); in nvme_fc_connect_queue()
1344 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; in nvme_fc_connect_queue()
1346 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); in nvme_fc_connect_queue()
1353 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) in nvme_fc_connect_queue()
1355 else if (conn_acc->hdr.desc_list_len != in nvme_fc_connect_queue()
1358 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) in nvme_fc_connect_queue()
1360 else if (conn_acc->hdr.rqst.desc_len != in nvme_fc_connect_queue()
1363 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) in nvme_fc_connect_queue()
1365 else if (conn_acc->connectid.desc_tag != in nvme_fc_connect_queue()
1368 else if (conn_acc->connectid.desc_len != in nvme_fc_connect_queue()
1373 ret = -EBADF; in nvme_fc_connect_queue()
1374 dev_err(ctrl->dev, in nvme_fc_connect_queue()
1376 queue->qnum, validation_errors[fcret]); in nvme_fc_connect_queue()
1378 queue->connection_id = in nvme_fc_connect_queue()
1379 be64_to_cpu(conn_acc->connectid.connection_id); in nvme_fc_connect_queue()
1380 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); in nvme_fc_connect_queue()
1387 dev_err(ctrl->dev, in nvme_fc_connect_queue()
1389 queue->qnum, ret); in nvme_fc_connect_queue()
1400 /* fc-nvme initiator doesn't care about success or failure of cmd */ in nvme_fc_disconnect_assoc_done()
1406 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1407 * the FC-NVME Association. Terminating the association also
1408 * terminates the FC-NVME connections (per queue, both admin and io
1410 * down, and the related FC-NVME Association ID and Connection IDs
1413 * The behavior of the fc-nvme initiator is such that it's
1416 * connectivity with the fc-nvme target, so you may never get a
1419 * continue on with terminating the association. If the fc-nvme target
1433 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_xmt_disconnect_assoc()
1435 dev_info(ctrl->ctrl.device, in nvme_fc_xmt_disconnect_assoc()
1436 "NVME-FC{%d}: send Disconnect Association " in nvme_fc_xmt_disconnect_assoc()
1438 ctrl->cnum); in nvme_fc_xmt_disconnect_assoc()
1444 lsreq = &lsop->ls_req; in nvme_fc_xmt_disconnect_assoc()
1445 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_xmt_disconnect_assoc()
1446 lsreq->private = (void *)&discon_acc[1]; in nvme_fc_xmt_disconnect_assoc()
1448 lsreq->private = NULL; in nvme_fc_xmt_disconnect_assoc()
1451 ctrl->association_id); in nvme_fc_xmt_disconnect_assoc()
1453 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, in nvme_fc_xmt_disconnect_assoc()
1462 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; in nvme_fc_xmt_ls_rsp_done()
1463 struct nvme_fc_rport *rport = lsop->rport; in nvme_fc_xmt_ls_rsp_done()
1464 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_xmt_ls_rsp_done()
1467 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_xmt_ls_rsp_done()
1468 list_del(&lsop->lsrcv_list); in nvme_fc_xmt_ls_rsp_done()
1469 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_xmt_ls_rsp_done()
1471 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma, in nvme_fc_xmt_ls_rsp_done()
1472 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); in nvme_fc_xmt_ls_rsp_done()
1473 fc_dma_unmap_single(lport->dev, lsop->rspdma, in nvme_fc_xmt_ls_rsp_done()
1474 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); in nvme_fc_xmt_ls_rsp_done()
1484 struct nvme_fc_rport *rport = lsop->rport; in nvme_fc_xmt_ls_rsp()
1485 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_xmt_ls_rsp()
1486 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; in nvme_fc_xmt_ls_rsp()
1489 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma, in nvme_fc_xmt_ls_rsp()
1490 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); in nvme_fc_xmt_ls_rsp()
1492 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport, in nvme_fc_xmt_ls_rsp()
1493 lsop->lsrsp); in nvme_fc_xmt_ls_rsp()
1495 dev_warn(lport->dev, in nvme_fc_xmt_ls_rsp()
1497 w0->ls_cmd, ret); in nvme_fc_xmt_ls_rsp()
1498 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); in nvme_fc_xmt_ls_rsp()
1508 &lsop->rqstbuf->rq_dis_assoc; in nvme_fc_match_disconn_ls()
1511 u64 association_id = be64_to_cpu(rqst->associd.association_id); in nvme_fc_match_disconn_ls()
1514 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_match_disconn_ls()
1516 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_match_disconn_ls()
1519 spin_lock(&ctrl->lock); in nvme_fc_match_disconn_ls()
1520 if (association_id == ctrl->association_id) { in nvme_fc_match_disconn_ls()
1521 oldls = ctrl->rcv_disconn; in nvme_fc_match_disconn_ls()
1522 ctrl->rcv_disconn = lsop; in nvme_fc_match_disconn_ls()
1525 spin_unlock(&ctrl->lock); in nvme_fc_match_disconn_ls()
1532 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_match_disconn_ls()
1536 dev_info(rport->lport->dev, in nvme_fc_match_disconn_ls()
1537 "NVME-FC{%d}: Multiple Disconnect Association " in nvme_fc_match_disconn_ls()
1538 "LS's received\n", ctrl->cnum); in nvme_fc_match_disconn_ls()
1540 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, in nvme_fc_match_disconn_ls()
1541 sizeof(*oldls->rspbuf), in nvme_fc_match_disconn_ls()
1542 rqst->w0.ls_cmd, in nvme_fc_match_disconn_ls()
1559 struct nvme_fc_rport *rport = lsop->rport; in nvme_fc_ls_disconnect_assoc()
1561 &lsop->rqstbuf->rq_dis_assoc; in nvme_fc_ls_disconnect_assoc()
1563 &lsop->rspbuf->rsp_dis_assoc; in nvme_fc_ls_disconnect_assoc()
1569 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst); in nvme_fc_ls_disconnect_assoc()
1578 dev_info(rport->lport->dev, in nvme_fc_ls_disconnect_assoc()
1581 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvme_fc_ls_disconnect_assoc()
1582 sizeof(*acc), rqst->w0.ls_cmd, in nvme_fc_ls_disconnect_assoc()
1592 lsop->lsrsp->rsplen = sizeof(*acc); in nvme_fc_ls_disconnect_assoc()
1615 * Actual Processing routine for received FC-NVME LS Requests from the LLD
1622 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; in nvme_fc_handle_ls_rqst()
1625 lsop->lsrsp->nvme_fc_private = lsop; in nvme_fc_handle_ls_rqst()
1626 lsop->lsrsp->rspbuf = lsop->rspbuf; in nvme_fc_handle_ls_rqst()
1627 lsop->lsrsp->rspdma = lsop->rspdma; in nvme_fc_handle_ls_rqst()
1628 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done; in nvme_fc_handle_ls_rqst()
1630 lsop->lsrsp->rsplen = 0; in nvme_fc_handle_ls_rqst()
1637 switch (w0->ls_cmd) { in nvme_fc_handle_ls_rqst()
1642 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, in nvme_fc_handle_ls_rqst()
1643 sizeof(*lsop->rspbuf), w0->ls_cmd, in nvme_fc_handle_ls_rqst()
1648 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, in nvme_fc_handle_ls_rqst()
1649 sizeof(*lsop->rspbuf), w0->ls_cmd, in nvme_fc_handle_ls_rqst()
1653 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, in nvme_fc_handle_ls_rqst()
1654 sizeof(*lsop->rspbuf), w0->ls_cmd, in nvme_fc_handle_ls_rqst()
1674 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_handle_ls_rqst_work()
1675 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) { in nvme_fc_handle_ls_rqst_work()
1676 if (lsop->handled) in nvme_fc_handle_ls_rqst_work()
1679 lsop->handled = true; in nvme_fc_handle_ls_rqst_work()
1680 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { in nvme_fc_handle_ls_rqst_work()
1681 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_handle_ls_rqst_work()
1684 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_handle_ls_rqst_work()
1685 w0 = &lsop->rqstbuf->w0; in nvme_fc_handle_ls_rqst_work()
1686 lsop->lsrsp->rsplen = nvme_fc_format_rjt( in nvme_fc_handle_ls_rqst_work()
1687 lsop->rspbuf, in nvme_fc_handle_ls_rqst_work()
1688 sizeof(*lsop->rspbuf), in nvme_fc_handle_ls_rqst_work()
1689 w0->ls_cmd, in nvme_fc_handle_ls_rqst_work()
1697 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_handle_ls_rqst_work()
1701 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
1704 * The nvme-fc layer will copy payload to an internal structure for
1725 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_rcv_ls_req()
1734 if (!lport->ops->xmt_ls_rsp) { in nvme_fc_rcv_ls_req()
1735 dev_info(lport->dev, in nvme_fc_rcv_ls_req()
1737 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvme_fc_rcv_ls_req()
1738 nvmefc_ls_names[w0->ls_cmd] : ""); in nvme_fc_rcv_ls_req()
1739 ret = -EINVAL; in nvme_fc_rcv_ls_req()
1744 dev_info(lport->dev, in nvme_fc_rcv_ls_req()
1746 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvme_fc_rcv_ls_req()
1747 nvmefc_ls_names[w0->ls_cmd] : ""); in nvme_fc_rcv_ls_req()
1748 ret = -E2BIG; in nvme_fc_rcv_ls_req()
1757 dev_info(lport->dev, in nvme_fc_rcv_ls_req()
1759 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvme_fc_rcv_ls_req()
1760 nvmefc_ls_names[w0->ls_cmd] : ""); in nvme_fc_rcv_ls_req()
1761 ret = -ENOMEM; in nvme_fc_rcv_ls_req()
1764 lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1]; in nvme_fc_rcv_ls_req()
1765 lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1]; in nvme_fc_rcv_ls_req()
1767 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf, in nvme_fc_rcv_ls_req()
1768 sizeof(*lsop->rspbuf), in nvme_fc_rcv_ls_req()
1770 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) { in nvme_fc_rcv_ls_req()
1771 dev_info(lport->dev, in nvme_fc_rcv_ls_req()
1773 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvme_fc_rcv_ls_req()
1774 nvmefc_ls_names[w0->ls_cmd] : ""); in nvme_fc_rcv_ls_req()
1775 ret = -EFAULT; in nvme_fc_rcv_ls_req()
1779 lsop->rport = rport; in nvme_fc_rcv_ls_req()
1780 lsop->lsrsp = lsrsp; in nvme_fc_rcv_ls_req()
1782 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len); in nvme_fc_rcv_ls_req()
1783 lsop->rqstdatalen = lsreqbuf_len; in nvme_fc_rcv_ls_req()
1785 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_rcv_ls_req()
1786 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { in nvme_fc_rcv_ls_req()
1787 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_rcv_ls_req()
1788 ret = -ENOTCONN; in nvme_fc_rcv_ls_req()
1791 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list); in nvme_fc_rcv_ls_req()
1792 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_rcv_ls_req()
1794 schedule_work(&rport->lsrcv_work); in nvme_fc_rcv_ls_req()
1799 fc_dma_unmap_single(lport->dev, lsop->rspdma, in nvme_fc_rcv_ls_req()
1800 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); in nvme_fc_rcv_ls_req()
1816 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, in __nvme_fc_exit_request()
1817 sizeof(op->rsp_iu), DMA_FROM_DEVICE); in __nvme_fc_exit_request()
1818 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, in __nvme_fc_exit_request()
1819 sizeof(op->cmd_iu), DMA_TO_DEVICE); in __nvme_fc_exit_request()
1821 atomic_set(&op->state, FCPOP_STATE_UNINIT); in __nvme_fc_exit_request()
1830 return __nvme_fc_exit_request(set->driver_data, op); in nvme_fc_exit_request()
1839 spin_lock_irqsave(&ctrl->lock, flags); in __nvme_fc_abort_op()
1840 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); in __nvme_fc_abort_op()
1842 atomic_set(&op->state, opstate); in __nvme_fc_abort_op()
1843 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { in __nvme_fc_abort_op()
1844 op->flags |= FCOP_FLAGS_TERMIO; in __nvme_fc_abort_op()
1845 ctrl->iocnt++; in __nvme_fc_abort_op()
1847 spin_unlock_irqrestore(&ctrl->lock, flags); in __nvme_fc_abort_op()
1850 return -ECANCELED; in __nvme_fc_abort_op()
1852 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, in __nvme_fc_abort_op()
1853 &ctrl->rport->remoteport, in __nvme_fc_abort_op()
1854 op->queue->lldd_handle, in __nvme_fc_abort_op()
1855 &op->fcp_req); in __nvme_fc_abort_op()
1863 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; in nvme_fc_abort_aen_ops()
1867 if (!(aen_op->flags & FCOP_FLAGS_AEN)) in nvme_fc_abort_aen_ops()
1881 spin_lock_irqsave(&ctrl->lock, flags); in __nvme_fc_fcpop_chk_teardowns()
1882 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) && in __nvme_fc_fcpop_chk_teardowns()
1883 op->flags & FCOP_FLAGS_TERMIO) { in __nvme_fc_fcpop_chk_teardowns()
1884 if (!--ctrl->iocnt) in __nvme_fc_fcpop_chk_teardowns()
1885 wake_up(&ctrl->ioabort_wait); in __nvme_fc_fcpop_chk_teardowns()
1887 spin_unlock_irqrestore(&ctrl->lock, flags); in __nvme_fc_fcpop_chk_teardowns()
1895 struct request *rq = op->rq; in nvme_fc_fcpio_done()
1896 struct nvmefc_fcp_req *freq = &op->fcp_req; in nvme_fc_fcpio_done()
1897 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_fcpio_done()
1898 struct nvme_fc_queue *queue = op->queue; in nvme_fc_fcpio_done()
1899 struct nvme_completion *cqe = &op->rsp_iu.cqe; in nvme_fc_fcpio_done() local
1900 struct nvme_command *sqe = &op->cmd_iu.sqe; in nvme_fc_fcpio_done()
1915 * This affects the FC-NVME implementation in two ways: in nvme_fc_fcpio_done()
1920 * 2) The FC-NVME implementation requires that delivery of in nvme_fc_fcpio_done()
1929 * every field in the cqe - in cases where the FC transport must in nvme_fc_fcpio_done()
1930 * fabricate a CQE, the following fields will not be set as they in nvme_fc_fcpio_done()
1932 * cqe.sqid, cqe.sqhd, cqe.command_id in nvme_fc_fcpio_done()
1938 * Per FC-NVME spec, failure of an individual command requires in nvme_fc_fcpio_done()
1943 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); in nvme_fc_fcpio_done()
1945 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, in nvme_fc_fcpio_done()
1946 sizeof(op->rsp_iu), DMA_FROM_DEVICE); in nvme_fc_fcpio_done()
1950 else if (freq->status) { in nvme_fc_fcpio_done()
1952 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
1953 "NVME-FC{%d}: io failed due to lldd error %d\n", in nvme_fc_fcpio_done()
1954 ctrl->cnum, freq->status); in nvme_fc_fcpio_done()
1959 * status, they blk-mq layer can typically be called with the in nvme_fc_fcpio_done()
1960 * non-zero status and the content of the cqe isn't important. in nvme_fc_fcpio_done()
1968 * extract the status and result from the cqe (create it in nvme_fc_fcpio_done()
1972 switch (freq->rcv_rsplen) { in nvme_fc_fcpio_done()
1979 * no payload in the CQE by the transport. in nvme_fc_fcpio_done()
1981 if (freq->transferred_length != in nvme_fc_fcpio_done()
1982 be32_to_cpu(op->cmd_iu.data_len)) { in nvme_fc_fcpio_done()
1984 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
1985 "NVME-FC{%d}: io failed due to bad transfer " in nvme_fc_fcpio_done()
1987 ctrl->cnum, freq->transferred_length, in nvme_fc_fcpio_done()
1988 be32_to_cpu(op->cmd_iu.data_len)); in nvme_fc_fcpio_done()
1996 * The ERSP IU contains a full completion with CQE. in nvme_fc_fcpio_done()
1997 * Validate ERSP IU and look at cqe. in nvme_fc_fcpio_done()
1999 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != in nvme_fc_fcpio_done()
2000 (freq->rcv_rsplen / 4) || in nvme_fc_fcpio_done()
2001 be32_to_cpu(op->rsp_iu.xfrd_len) != in nvme_fc_fcpio_done()
2002 freq->transferred_length || in nvme_fc_fcpio_done()
2003 op->rsp_iu.ersp_result || in nvme_fc_fcpio_done()
2004 sqe->common.command_id != cqe->command_id)) { in nvme_fc_fcpio_done()
2006 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
2007 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: " in nvme_fc_fcpio_done()
2010 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), in nvme_fc_fcpio_done()
2011 be32_to_cpu(op->rsp_iu.xfrd_len), in nvme_fc_fcpio_done()
2012 freq->transferred_length, in nvme_fc_fcpio_done()
2013 op->rsp_iu.ersp_result, in nvme_fc_fcpio_done()
2014 sqe->common.command_id, in nvme_fc_fcpio_done()
2015 cqe->command_id); in nvme_fc_fcpio_done()
2018 result = cqe->result; in nvme_fc_fcpio_done()
2019 status = cqe->status; in nvme_fc_fcpio_done()
2024 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
2025 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu " in nvme_fc_fcpio_done()
2027 ctrl->cnum, freq->rcv_rsplen); in nvme_fc_fcpio_done()
2034 if (op->flags & FCOP_FLAGS_AEN) { in nvme_fc_fcpio_done()
2035 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); in nvme_fc_fcpio_done()
2037 atomic_set(&op->state, FCPOP_STATE_IDLE); in nvme_fc_fcpio_done()
2038 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ in nvme_fc_fcpio_done()
2059 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; in __nvme_fc_init_request()
2063 op->fcp_req.cmdaddr = &op->cmd_iu; in __nvme_fc_init_request()
2064 op->fcp_req.cmdlen = sizeof(op->cmd_iu); in __nvme_fc_init_request()
2065 op->fcp_req.rspaddr = &op->rsp_iu; in __nvme_fc_init_request()
2066 op->fcp_req.rsplen = sizeof(op->rsp_iu); in __nvme_fc_init_request()
2067 op->fcp_req.done = nvme_fc_fcpio_done; in __nvme_fc_init_request()
2068 op->ctrl = ctrl; in __nvme_fc_init_request()
2069 op->queue = queue; in __nvme_fc_init_request()
2070 op->rq = rq; in __nvme_fc_init_request()
2071 op->rqno = rqno; in __nvme_fc_init_request()
2073 cmdiu->format_id = NVME_CMD_FORMAT_ID; in __nvme_fc_init_request()
2074 cmdiu->fc_id = NVME_CMD_FC_ID; in __nvme_fc_init_request()
2075 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); in __nvme_fc_init_request()
2076 if (queue->qnum) in __nvme_fc_init_request()
2077 cmdiu->rsv_cat = fccmnd_set_cat_css(0, in __nvme_fc_init_request()
2080 cmdiu->rsv_cat = fccmnd_set_cat_admin(0); in __nvme_fc_init_request()
2082 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, in __nvme_fc_init_request()
2083 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); in __nvme_fc_init_request()
2084 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { in __nvme_fc_init_request()
2085 dev_err(ctrl->dev, in __nvme_fc_init_request()
2086 "FCP Op failed - cmdiu dma mapping failed.\n"); in __nvme_fc_init_request()
2087 ret = -EFAULT; in __nvme_fc_init_request()
2091 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, in __nvme_fc_init_request()
2092 &op->rsp_iu, sizeof(op->rsp_iu), in __nvme_fc_init_request()
2094 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { in __nvme_fc_init_request()
2095 dev_err(ctrl->dev, in __nvme_fc_init_request()
2096 "FCP Op failed - rspiu dma mapping failed.\n"); in __nvme_fc_init_request()
2097 ret = -EFAULT; in __nvme_fc_init_request()
2100 atomic_set(&op->state, FCPOP_STATE_IDLE); in __nvme_fc_init_request()
2109 struct nvme_fc_ctrl *ctrl = set->driver_data; in nvme_fc_init_request()
2111 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_fc_init_request()
2112 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; in nvme_fc_init_request()
2115 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); in nvme_fc_init_request()
2118 op->op.fcp_req.first_sgl = op->sgl; in nvme_fc_init_request()
2119 op->op.fcp_req.private = &op->priv[0]; in nvme_fc_init_request()
2120 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_fc_init_request()
2133 aen_op = ctrl->aen_ops; in nvme_fc_init_aen_ops()
2135 if (ctrl->lport->ops->fcprqst_priv_sz) { in nvme_fc_init_aen_ops()
2136 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, in nvme_fc_init_aen_ops()
2139 return -ENOMEM; in nvme_fc_init_aen_ops()
2142 cmdiu = &aen_op->cmd_iu; in nvme_fc_init_aen_ops()
2143 sqe = &cmdiu->sqe; in nvme_fc_init_aen_ops()
2144 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], in nvme_fc_init_aen_ops()
2152 aen_op->flags = FCOP_FLAGS_AEN; in nvme_fc_init_aen_ops()
2153 aen_op->fcp_req.private = private; in nvme_fc_init_aen_ops()
2156 sqe->common.opcode = nvme_admin_async_event; in nvme_fc_init_aen_ops()
2158 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; in nvme_fc_init_aen_ops()
2169 cancel_work_sync(&ctrl->ctrl.async_event_work); in nvme_fc_term_aen_ops()
2170 aen_op = ctrl->aen_ops; in nvme_fc_term_aen_ops()
2174 kfree(aen_op->fcp_req.private); in nvme_fc_term_aen_ops()
2175 aen_op->fcp_req.private = NULL; in nvme_fc_term_aen_ops()
2183 struct nvme_fc_queue *queue = &ctrl->queues[qidx]; in __nvme_fc_init_hctx()
2185 hctx->driver_data = queue; in __nvme_fc_init_hctx()
2186 queue->hctx = hctx; in __nvme_fc_init_hctx()
2216 queue = &ctrl->queues[idx]; in nvme_fc_init_queue()
2218 queue->ctrl = ctrl; in nvme_fc_init_queue()
2219 queue->qnum = idx; in nvme_fc_init_queue()
2220 atomic_set(&queue->csn, 0); in nvme_fc_init_queue()
2221 queue->dev = ctrl->dev; in nvme_fc_init_queue()
2224 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_fc_init_queue()
2226 queue->cmnd_capsule_len = sizeof(struct nvme_command); in nvme_fc_init_queue()
2230 * and CQEs and dma map them - mapping their respective entries in nvme_fc_init_queue()
2235 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload in nvme_fc_init_queue()
2251 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) in nvme_fc_free_queue()
2254 clear_bit(NVME_FC_Q_LIVE, &queue->flags); in nvme_fc_free_queue()
2261 queue->connection_id = 0; in nvme_fc_free_queue()
2262 atomic_set(&queue->csn, 0); in nvme_fc_free_queue()
2269 if (ctrl->lport->ops->delete_queue) in __nvme_fc_delete_hw_queue()
2270 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, in __nvme_fc_delete_hw_queue()
2271 queue->lldd_handle); in __nvme_fc_delete_hw_queue()
2272 queue->lldd_handle = NULL; in __nvme_fc_delete_hw_queue()
2280 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_fc_free_io_queues()
2281 nvme_fc_free_queue(&ctrl->queues[i]); in nvme_fc_free_io_queues()
2290 queue->lldd_handle = NULL; in __nvme_fc_create_hw_queue()
2291 if (ctrl->lport->ops->create_queue) in __nvme_fc_create_hw_queue()
2292 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, in __nvme_fc_create_hw_queue()
2293 qidx, qsize, &queue->lldd_handle); in __nvme_fc_create_hw_queue()
2301 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; in nvme_fc_delete_hw_io_queues()
2304 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) in nvme_fc_delete_hw_io_queues()
2311 struct nvme_fc_queue *queue = &ctrl->queues[1]; in nvme_fc_create_hw_io_queues()
2314 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { in nvme_fc_create_hw_io_queues()
2323 for (; i > 0; i--) in nvme_fc_create_hw_io_queues()
2324 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); in nvme_fc_create_hw_io_queues()
2333 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_fc_connect_io_queues()
2334 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, in nvme_fc_connect_io_queues()
2338 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); in nvme_fc_connect_io_queues()
2342 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); in nvme_fc_connect_io_queues()
2353 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_fc_init_io_queues()
2364 if (ctrl->ctrl.tagset) { in nvme_fc_ctrl_free()
2365 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_fc_ctrl_free()
2366 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_fc_ctrl_free()
2370 spin_lock_irqsave(&ctrl->rport->lock, flags); in nvme_fc_ctrl_free()
2371 list_del(&ctrl->ctrl_list); in nvme_fc_ctrl_free()
2372 spin_unlock_irqrestore(&ctrl->rport->lock, flags); in nvme_fc_ctrl_free()
2374 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); in nvme_fc_ctrl_free()
2375 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_fc_ctrl_free()
2376 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_fc_ctrl_free()
2377 blk_mq_free_tag_set(&ctrl->admin_tag_set); in nvme_fc_ctrl_free()
2379 kfree(ctrl->queues); in nvme_fc_ctrl_free()
2381 put_device(ctrl->dev); in nvme_fc_ctrl_free()
2382 nvme_fc_rport_put(ctrl->rport); in nvme_fc_ctrl_free()
2384 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); in nvme_fc_ctrl_free()
2385 if (ctrl->ctrl.opts) in nvme_fc_ctrl_free()
2386 nvmf_free_options(ctrl->ctrl.opts); in nvme_fc_ctrl_free()
2393 kref_put(&ctrl->ref, nvme_fc_ctrl_free); in nvme_fc_ctrl_put()
2399 return kref_get_unless_zero(&ctrl->ref); in nvme_fc_ctrl_get()
2403 * All accesses from nvme core layer done - can now free the
2411 WARN_ON(nctrl != &ctrl->ctrl); in nvme_fc_nvme_ctrl_freed()
2464 if (ctrl->ctrl.queue_count > 1) { in __nvme_fc_abort_outstanding_ios()
2465 nvme_stop_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2466 blk_mq_tagset_busy_iter(&ctrl->tag_set, in __nvme_fc_abort_outstanding_ios()
2467 nvme_fc_terminate_exchange, &ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2468 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); in __nvme_fc_abort_outstanding_ios()
2470 nvme_start_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2474 * Other transports, which don't have link-level contexts bound in __nvme_fc_abort_outstanding_ios()
2488 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); in __nvme_fc_abort_outstanding_ios()
2489 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, in __nvme_fc_abort_outstanding_ios()
2490 nvme_fc_terminate_exchange, &ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2491 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); in __nvme_fc_abort_outstanding_ios()
2504 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { in nvme_fc_error_recovery()
2506 set_bit(ASSOC_FAILED, &ctrl->flags); in nvme_fc_error_recovery()
2510 /* Otherwise, only proceed if in LIVE state - e.g. on first error */ in nvme_fc_error_recovery()
2511 if (ctrl->ctrl.state != NVME_CTRL_LIVE) in nvme_fc_error_recovery()
2514 dev_warn(ctrl->ctrl.device, in nvme_fc_error_recovery()
2515 "NVME-FC{%d}: transport association event: %s\n", in nvme_fc_error_recovery()
2516 ctrl->cnum, errmsg); in nvme_fc_error_recovery()
2517 dev_warn(ctrl->ctrl.device, in nvme_fc_error_recovery()
2518 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); in nvme_fc_error_recovery()
2520 nvme_reset_ctrl(&ctrl->ctrl); in nvme_fc_error_recovery()
2527 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_timeout()
2528 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; in nvme_fc_timeout()
2529 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_timeout()
2535 dev_info(ctrl->ctrl.device, in nvme_fc_timeout()
2536 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: " in nvme_fc_timeout()
2538 ctrl->cnum, op->queue->qnum, sqe->common.opcode, in nvme_fc_timeout()
2539 sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); in nvme_fc_timeout()
2555 struct nvmefc_fcp_req *freq = &op->fcp_req; in nvme_fc_map_data()
2558 freq->sg_cnt = 0; in nvme_fc_map_data()
2563 freq->sg_table.sgl = freq->first_sgl; in nvme_fc_map_data()
2564 ret = sg_alloc_table_chained(&freq->sg_table, in nvme_fc_map_data()
2565 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, in nvme_fc_map_data()
2568 return -ENOMEM; in nvme_fc_map_data()
2570 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); in nvme_fc_map_data()
2571 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); in nvme_fc_map_data()
2572 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, in nvme_fc_map_data()
2573 op->nents, rq_dma_dir(rq)); in nvme_fc_map_data()
2574 if (unlikely(freq->sg_cnt <= 0)) { in nvme_fc_map_data()
2575 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); in nvme_fc_map_data()
2576 freq->sg_cnt = 0; in nvme_fc_map_data()
2577 return -EFAULT; in nvme_fc_map_data()
2590 struct nvmefc_fcp_req *freq = &op->fcp_req; in nvme_fc_unmap_data()
2592 if (!freq->sg_cnt) in nvme_fc_unmap_data()
2595 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, in nvme_fc_unmap_data()
2598 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); in nvme_fc_unmap_data()
2600 freq->sg_cnt = 0; in nvme_fc_unmap_data()
2611 * as part of the exchange. The CQE is the last thing for the io,
2613 * sent on the exchange. After the CQE is received, the FC exchange is
2622 * So - while the operation is outstanding to the LLDD, there is a link
2631 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; in nvme_fc_start_fcp_op()
2632 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_start_fcp_op()
2639 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_start_fcp_op()
2645 /* format the FC-NVME CMD IU and fcp_req */ in nvme_fc_start_fcp_op()
2646 cmdiu->connection_id = cpu_to_be64(queue->connection_id); in nvme_fc_start_fcp_op()
2647 cmdiu->data_len = cpu_to_be32(data_len); in nvme_fc_start_fcp_op()
2650 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; in nvme_fc_start_fcp_op()
2653 cmdiu->flags = FCNVME_CMD_FLAGS_READ; in nvme_fc_start_fcp_op()
2656 cmdiu->flags = 0; in nvme_fc_start_fcp_op()
2659 op->fcp_req.payload_length = data_len; in nvme_fc_start_fcp_op()
2660 op->fcp_req.io_dir = io_dir; in nvme_fc_start_fcp_op()
2661 op->fcp_req.transferred_length = 0; in nvme_fc_start_fcp_op()
2662 op->fcp_req.rcv_rsplen = 0; in nvme_fc_start_fcp_op()
2663 op->fcp_req.status = NVME_SC_SUCCESS; in nvme_fc_start_fcp_op()
2664 op->fcp_req.sqid = cpu_to_le16(queue->qnum); in nvme_fc_start_fcp_op()
2668 * as well as those by FC-NVME spec. in nvme_fc_start_fcp_op()
2670 WARN_ON_ONCE(sqe->common.metadata); in nvme_fc_start_fcp_op()
2671 sqe->common.flags |= NVME_CMD_SGL_METABUF; in nvme_fc_start_fcp_op()
2674 * format SQE DPTR field per FC-NVME rules: in nvme_fc_start_fcp_op()
2676 * subtype=0xA Transport-specific value in nvme_fc_start_fcp_op()
2680 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_fc_start_fcp_op()
2682 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); in nvme_fc_start_fcp_op()
2683 sqe->rw.dptr.sgl.addr = 0; in nvme_fc_start_fcp_op()
2685 if (!(op->flags & FCOP_FLAGS_AEN)) { in nvme_fc_start_fcp_op()
2686 ret = nvme_fc_map_data(ctrl, op->rq, op); in nvme_fc_start_fcp_op()
2688 nvme_cleanup_cmd(op->rq); in nvme_fc_start_fcp_op()
2690 if (ret == -ENOMEM || ret == -EAGAIN) in nvme_fc_start_fcp_op()
2696 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, in nvme_fc_start_fcp_op()
2697 sizeof(op->cmd_iu), DMA_TO_DEVICE); in nvme_fc_start_fcp_op()
2699 atomic_set(&op->state, FCPOP_STATE_ACTIVE); in nvme_fc_start_fcp_op()
2701 if (!(op->flags & FCOP_FLAGS_AEN)) in nvme_fc_start_fcp_op()
2702 blk_mq_start_request(op->rq); in nvme_fc_start_fcp_op()
2704 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); in nvme_fc_start_fcp_op()
2705 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, in nvme_fc_start_fcp_op()
2706 &ctrl->rport->remoteport, in nvme_fc_start_fcp_op()
2707 queue->lldd_handle, &op->fcp_req); in nvme_fc_start_fcp_op()
2713 * no - as the connection won't be live. If it is a command in nvme_fc_start_fcp_op()
2714 * post-connect, it's possible a gap in csn may be created. in nvme_fc_start_fcp_op()
2722 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); in nvme_fc_start_fcp_op()
2725 if (!(op->flags & FCOP_FLAGS_AEN)) { in nvme_fc_start_fcp_op()
2726 nvme_fc_unmap_data(ctrl, op->rq, op); in nvme_fc_start_fcp_op()
2727 nvme_cleanup_cmd(op->rq); in nvme_fc_start_fcp_op()
2732 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && in nvme_fc_start_fcp_op()
2733 ret != -EBUSY) in nvme_fc_start_fcp_op()
2746 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_fc_queue_rq()
2747 struct nvme_fc_queue *queue = hctx->driver_data; in nvme_fc_queue_rq()
2748 struct nvme_fc_ctrl *ctrl = queue->ctrl; in nvme_fc_queue_rq()
2749 struct request *rq = bd->rq; in nvme_fc_queue_rq()
2751 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; in nvme_fc_queue_rq()
2752 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_queue_rq()
2754 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); in nvme_fc_queue_rq()
2758 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || in nvme_fc_queue_rq()
2759 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_fc_queue_rq()
2760 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_fc_queue_rq()
2768 * as WRITE ZEROES will return a non-zero rq payload_bytes yet in nvme_fc_queue_rq()
2794 if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) in nvme_fc_submit_async_event()
2797 aen_op = &ctrl->aen_ops[0]; in nvme_fc_submit_async_event()
2799 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, in nvme_fc_submit_async_event()
2802 dev_err(ctrl->ctrl.device, in nvme_fc_submit_async_event()
2810 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_complete_rq()
2812 atomic_set(&op->state, FCPOP_STATE_IDLE); in nvme_fc_complete_rq()
2813 op->flags &= ~FCOP_FLAGS_TERMIO; in nvme_fc_complete_rq()
2833 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_create_io_queues()
2837 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), in nvme_fc_create_io_queues()
2838 ctrl->lport->ops->max_hw_queues); in nvme_fc_create_io_queues()
2839 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_fc_create_io_queues()
2841 dev_info(ctrl->ctrl.device, in nvme_fc_create_io_queues()
2846 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_fc_create_io_queues()
2852 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_fc_create_io_queues()
2853 ctrl->tag_set.ops = &nvme_fc_mq_ops; in nvme_fc_create_io_queues()
2854 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_fc_create_io_queues()
2855 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ in nvme_fc_create_io_queues()
2856 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; in nvme_fc_create_io_queues()
2857 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in nvme_fc_create_io_queues()
2858 ctrl->tag_set.cmd_size = in nvme_fc_create_io_queues()
2860 ctrl->lport->ops->fcprqst_priv_sz); in nvme_fc_create_io_queues()
2861 ctrl->tag_set.driver_data = ctrl; in nvme_fc_create_io_queues()
2862 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; in nvme_fc_create_io_queues()
2863 ctrl->tag_set.timeout = NVME_IO_TIMEOUT; in nvme_fc_create_io_queues()
2865 ret = blk_mq_alloc_tag_set(&ctrl->tag_set); in nvme_fc_create_io_queues()
2869 ctrl->ctrl.tagset = &ctrl->tag_set; in nvme_fc_create_io_queues()
2871 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); in nvme_fc_create_io_queues()
2872 if (IS_ERR(ctrl->ctrl.connect_q)) { in nvme_fc_create_io_queues()
2873 ret = PTR_ERR(ctrl->ctrl.connect_q); in nvme_fc_create_io_queues()
2877 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues()
2881 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues()
2885 ctrl->ioq_live = true; in nvme_fc_create_io_queues()
2892 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_fc_create_io_queues()
2894 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_fc_create_io_queues()
2898 ctrl->ctrl.tagset = NULL; in nvme_fc_create_io_queues()
2906 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_recreate_io_queues()
2907 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; in nvme_fc_recreate_io_queues()
2911 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), in nvme_fc_recreate_io_queues()
2912 ctrl->lport->ops->max_hw_queues); in nvme_fc_recreate_io_queues()
2913 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_fc_recreate_io_queues()
2915 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2921 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2924 return -ENOSPC; in nvme_fc_recreate_io_queues()
2927 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_fc_recreate_io_queues()
2929 if (ctrl->ctrl.queue_count == 1) in nvme_fc_recreate_io_queues()
2932 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues()
2936 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues()
2941 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2944 nvme_wait_freeze(&ctrl->ctrl); in nvme_fc_recreate_io_queues()
2945 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); in nvme_fc_recreate_io_queues()
2946 nvme_unfreeze(&ctrl->ctrl); in nvme_fc_recreate_io_queues()
2961 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_rport_active_on_lport()
2963 atomic_inc(&lport->act_rport_cnt); in nvme_fc_rport_active_on_lport()
2969 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_rport_inactive_on_lport()
2972 cnt = atomic_dec_return(&lport->act_rport_cnt); in nvme_fc_rport_inactive_on_lport()
2973 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) in nvme_fc_rport_inactive_on_lport()
2974 lport->ops->localport_delete(&lport->localport); in nvme_fc_rport_inactive_on_lport()
2980 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_ctlr_active_on_rport()
2983 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) in nvme_fc_ctlr_active_on_rport()
2986 cnt = atomic_inc_return(&rport->act_ctrl_cnt); in nvme_fc_ctlr_active_on_rport()
2996 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_ctlr_inactive_on_rport()
2997 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_ctlr_inactive_on_rport()
3000 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */ in nvme_fc_ctlr_inactive_on_rport()
3002 cnt = atomic_dec_return(&rport->act_ctrl_cnt); in nvme_fc_ctlr_inactive_on_rport()
3004 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) in nvme_fc_ctlr_inactive_on_rport()
3005 lport->ops->remoteport_delete(&rport->remoteport); in nvme_fc_ctlr_inactive_on_rport()
3019 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_create_association()
3025 ++ctrl->ctrl.nr_reconnects; in nvme_fc_create_association()
3027 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_create_association()
3028 return -ENODEV; in nvme_fc_create_association()
3031 return -ENOTUNIQ; in nvme_fc_create_association()
3033 dev_info(ctrl->ctrl.device, in nvme_fc_create_association()
3034 "NVME-FC{%d}: create association : host wwpn 0x%016llx " in nvme_fc_create_association()
3036 ctrl->cnum, ctrl->lport->localport.port_name, in nvme_fc_create_association()
3037 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); in nvme_fc_create_association()
3039 clear_bit(ASSOC_FAILED, &ctrl->flags); in nvme_fc_create_association()
3045 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, in nvme_fc_create_association()
3050 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], in nvme_fc_create_association()
3055 ret = nvmf_connect_admin_queue(&ctrl->ctrl); in nvme_fc_create_association()
3059 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); in nvme_fc_create_association()
3064 * todo:- add code to check if ctrl attributes changed from in nvme_fc_create_association()
3068 ret = nvme_enable_ctrl(&ctrl->ctrl); in nvme_fc_create_association()
3069 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3072 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; in nvme_fc_create_association()
3073 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << in nvme_fc_create_association()
3074 (ilog2(SZ_4K) - 9); in nvme_fc_create_association()
3076 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); in nvme_fc_create_association()
3078 ret = nvme_init_identify(&ctrl->ctrl); in nvme_fc_create_association()
3079 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3084 /* FC-NVME does not have other data in the capsule */ in nvme_fc_create_association()
3085 if (ctrl->ctrl.icdoff) { in nvme_fc_create_association()
3086 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", in nvme_fc_create_association()
3087 ctrl->ctrl.icdoff); in nvme_fc_create_association()
3091 /* FC-NVME supports normal SGL Data Block Descriptors */ in nvme_fc_create_association()
3093 if (opts->queue_size > ctrl->ctrl.maxcmd) { in nvme_fc_create_association()
3095 dev_warn(ctrl->ctrl.device, in nvme_fc_create_association()
3098 opts->queue_size, ctrl->ctrl.maxcmd); in nvme_fc_create_association()
3099 opts->queue_size = ctrl->ctrl.maxcmd; in nvme_fc_create_association()
3102 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_fc_create_association()
3104 dev_warn(ctrl->ctrl.device, in nvme_fc_create_association()
3107 opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_fc_create_association()
3108 opts->queue_size = ctrl->ctrl.sqsize + 1; in nvme_fc_create_association()
3119 if (ctrl->ctrl.queue_count > 1) { in nvme_fc_create_association()
3120 if (!ctrl->ioq_live) in nvme_fc_create_association()
3125 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3128 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); in nvme_fc_create_association()
3130 ctrl->ctrl.nr_reconnects = 0; in nvme_fc_create_association()
3133 nvme_start_ctrl(&ctrl->ctrl); in nvme_fc_create_association()
3140 /* send a Disconnect(association) LS to fc-nvme target */ in nvme_fc_create_association()
3142 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_create_association()
3143 ctrl->association_id = 0; in nvme_fc_create_association()
3144 disls = ctrl->rcv_disconn; in nvme_fc_create_association()
3145 ctrl->rcv_disconn = NULL; in nvme_fc_create_association()
3146 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_create_association()
3150 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); in nvme_fc_create_association()
3152 nvme_fc_free_queue(&ctrl->queues[0]); in nvme_fc_create_association()
3153 clear_bit(ASSOC_ACTIVE, &ctrl->flags); in nvme_fc_create_association()
3172 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) in nvme_fc_delete_association()
3175 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_delete_association()
3176 set_bit(FCCTRL_TERMIO, &ctrl->flags); in nvme_fc_delete_association()
3177 ctrl->iocnt = 0; in nvme_fc_delete_association()
3178 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_delete_association()
3186 spin_lock_irq(&ctrl->lock); in nvme_fc_delete_association()
3187 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); in nvme_fc_delete_association()
3188 clear_bit(FCCTRL_TERMIO, &ctrl->flags); in nvme_fc_delete_association()
3189 spin_unlock_irq(&ctrl->lock); in nvme_fc_delete_association()
3194 * send a Disconnect(association) LS to fc-nvme target in nvme_fc_delete_association()
3199 if (ctrl->association_id) in nvme_fc_delete_association()
3202 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_delete_association()
3203 ctrl->association_id = 0; in nvme_fc_delete_association()
3204 disls = ctrl->rcv_disconn; in nvme_fc_delete_association()
3205 ctrl->rcv_disconn = NULL; in nvme_fc_delete_association()
3206 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_delete_association()
3214 if (ctrl->ctrl.tagset) { in nvme_fc_delete_association()
3219 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); in nvme_fc_delete_association()
3220 nvme_fc_free_queue(&ctrl->queues[0]); in nvme_fc_delete_association()
3222 /* re-enable the admin_q so anything new can fast fail */ in nvme_fc_delete_association()
3223 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); in nvme_fc_delete_association()
3226 nvme_start_queues(&ctrl->ctrl); in nvme_fc_delete_association()
3236 cancel_delayed_work_sync(&ctrl->connect_work); in nvme_fc_delete_ctrl()
3247 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_reconnect_or_delete()
3248 struct nvme_fc_remote_port *portptr = &rport->remoteport; in nvme_fc_reconnect_or_delete()
3249 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; in nvme_fc_reconnect_or_delete()
3252 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) in nvme_fc_reconnect_or_delete()
3255 if (portptr->port_state == FC_OBJSTATE_ONLINE) in nvme_fc_reconnect_or_delete()
3256 dev_info(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3257 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", in nvme_fc_reconnect_or_delete()
3258 ctrl->cnum, status); in nvme_fc_reconnect_or_delete()
3259 else if (time_after_eq(jiffies, rport->dev_loss_end)) in nvme_fc_reconnect_or_delete()
3262 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { in nvme_fc_reconnect_or_delete()
3263 if (portptr->port_state == FC_OBJSTATE_ONLINE) in nvme_fc_reconnect_or_delete()
3264 dev_info(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3265 "NVME-FC{%d}: Reconnect attempt in %ld " in nvme_fc_reconnect_or_delete()
3267 ctrl->cnum, recon_delay / HZ); in nvme_fc_reconnect_or_delete()
3268 else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) in nvme_fc_reconnect_or_delete()
3269 recon_delay = rport->dev_loss_end - jiffies; in nvme_fc_reconnect_or_delete()
3271 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); in nvme_fc_reconnect_or_delete()
3273 if (portptr->port_state == FC_OBJSTATE_ONLINE) in nvme_fc_reconnect_or_delete()
3274 dev_warn(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3275 "NVME-FC{%d}: Max reconnect attempts (%d) " in nvme_fc_reconnect_or_delete()
3277 ctrl->cnum, ctrl->ctrl.nr_reconnects); in nvme_fc_reconnect_or_delete()
3279 dev_warn(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3280 "NVME-FC{%d}: dev_loss_tmo (%d) expired " in nvme_fc_reconnect_or_delete()
3282 ctrl->cnum, min_t(int, portptr->dev_loss_tmo, in nvme_fc_reconnect_or_delete()
3283 (ctrl->ctrl.opts->max_reconnects * in nvme_fc_reconnect_or_delete()
3284 ctrl->ctrl.opts->reconnect_delay))); in nvme_fc_reconnect_or_delete()
3285 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); in nvme_fc_reconnect_or_delete()
3295 nvme_stop_ctrl(&ctrl->ctrl); in nvme_fc_reset_ctrl_work()
3300 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) in nvme_fc_reset_ctrl_work()
3301 dev_err(ctrl->ctrl.device, in nvme_fc_reset_ctrl_work()
3302 "NVME-FC{%d}: error_recovery: Couldn't change state " in nvme_fc_reset_ctrl_work()
3303 "to CONNECTING\n", ctrl->cnum); in nvme_fc_reset_ctrl_work()
3305 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { in nvme_fc_reset_ctrl_work()
3306 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { in nvme_fc_reset_ctrl_work()
3307 dev_err(ctrl->ctrl.device, in nvme_fc_reset_ctrl_work()
3308 "NVME-FC{%d}: failed to schedule connect " in nvme_fc_reset_ctrl_work()
3309 "after reset\n", ctrl->cnum); in nvme_fc_reset_ctrl_work()
3311 flush_delayed_work(&ctrl->connect_work); in nvme_fc_reset_ctrl_work()
3314 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN); in nvme_fc_reset_ctrl_work()
3345 dev_info(ctrl->ctrl.device, in nvme_fc_connect_ctrl_work()
3346 "NVME-FC{%d}: controller connect complete\n", in nvme_fc_connect_ctrl_work()
3347 ctrl->cnum); in nvme_fc_connect_ctrl_work()
3377 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_existing_controller()
3378 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_existing_controller()
3379 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); in nvme_fc_existing_controller()
3383 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_existing_controller()
3396 if (!(rport->remoteport.port_role & in nvme_fc_init_ctrl()
3398 ret = -EBADR; in nvme_fc_init_ctrl()
3402 if (!opts->duplicate_connect && in nvme_fc_init_ctrl()
3404 ret = -EALREADY; in nvme_fc_init_ctrl()
3410 ret = -ENOMEM; in nvme_fc_init_ctrl()
3416 ret = -ENOSPC; in nvme_fc_init_ctrl()
3424 if (opts->max_reconnects != -1 && in nvme_fc_init_ctrl()
3425 opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY && in nvme_fc_init_ctrl()
3426 opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) { in nvme_fc_init_ctrl()
3427 ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay; in nvme_fc_init_ctrl()
3428 opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO; in nvme_fc_init_ctrl()
3429 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, in nvme_fc_init_ctrl()
3430 opts->reconnect_delay); in nvme_fc_init_ctrl()
3433 ctrl->ctrl.opts = opts; in nvme_fc_init_ctrl()
3434 ctrl->ctrl.nr_reconnects = 0; in nvme_fc_init_ctrl()
3435 if (lport->dev) in nvme_fc_init_ctrl()
3436 ctrl->ctrl.numa_node = dev_to_node(lport->dev); in nvme_fc_init_ctrl()
3438 ctrl->ctrl.numa_node = NUMA_NO_NODE; in nvme_fc_init_ctrl()
3439 INIT_LIST_HEAD(&ctrl->ctrl_list); in nvme_fc_init_ctrl()
3440 ctrl->lport = lport; in nvme_fc_init_ctrl()
3441 ctrl->rport = rport; in nvme_fc_init_ctrl()
3442 ctrl->dev = lport->dev; in nvme_fc_init_ctrl()
3443 ctrl->cnum = idx; in nvme_fc_init_ctrl()
3444 ctrl->ioq_live = false; in nvme_fc_init_ctrl()
3445 init_waitqueue_head(&ctrl->ioabort_wait); in nvme_fc_init_ctrl()
3447 get_device(ctrl->dev); in nvme_fc_init_ctrl()
3448 kref_init(&ctrl->ref); in nvme_fc_init_ctrl()
3450 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); in nvme_fc_init_ctrl()
3451 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); in nvme_fc_init_ctrl()
3452 spin_lock_init(&ctrl->lock); in nvme_fc_init_ctrl()
3455 ctrl->ctrl.queue_count = min_t(unsigned int, in nvme_fc_init_ctrl()
3456 opts->nr_io_queues, in nvme_fc_init_ctrl()
3457 lport->ops->max_hw_queues); in nvme_fc_init_ctrl()
3458 ctrl->ctrl.queue_count++; /* +1 for admin queue */ in nvme_fc_init_ctrl()
3460 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_fc_init_ctrl()
3461 ctrl->ctrl.kato = opts->kato; in nvme_fc_init_ctrl()
3462 ctrl->ctrl.cntlid = 0xffff; in nvme_fc_init_ctrl()
3464 ret = -ENOMEM; in nvme_fc_init_ctrl()
3465 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, in nvme_fc_init_ctrl()
3467 if (!ctrl->queues) in nvme_fc_init_ctrl()
3472 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); in nvme_fc_init_ctrl()
3473 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; in nvme_fc_init_ctrl()
3474 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_fc_init_ctrl()
3475 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ in nvme_fc_init_ctrl()
3476 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; in nvme_fc_init_ctrl()
3477 ctrl->admin_tag_set.cmd_size = in nvme_fc_init_ctrl()
3479 ctrl->lport->ops->fcprqst_priv_sz); in nvme_fc_init_ctrl()
3480 ctrl->admin_tag_set.driver_data = ctrl; in nvme_fc_init_ctrl()
3481 ctrl->admin_tag_set.nr_hw_queues = 1; in nvme_fc_init_ctrl()
3482 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; in nvme_fc_init_ctrl()
3483 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; in nvme_fc_init_ctrl()
3485 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); in nvme_fc_init_ctrl()
3488 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; in nvme_fc_init_ctrl()
3490 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_fc_init_ctrl()
3491 if (IS_ERR(ctrl->ctrl.fabrics_q)) { in nvme_fc_init_ctrl()
3492 ret = PTR_ERR(ctrl->ctrl.fabrics_q); in nvme_fc_init_ctrl()
3496 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_fc_init_ctrl()
3497 if (IS_ERR(ctrl->ctrl.admin_q)) { in nvme_fc_init_ctrl()
3498 ret = PTR_ERR(ctrl->ctrl.admin_q); in nvme_fc_init_ctrl()
3509 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); in nvme_fc_init_ctrl()
3515 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_init_ctrl()
3516 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); in nvme_fc_init_ctrl()
3517 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_init_ctrl()
3519 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || in nvme_fc_init_ctrl()
3520 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_fc_init_ctrl()
3521 dev_err(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3522 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); in nvme_fc_init_ctrl()
3526 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { in nvme_fc_init_ctrl()
3527 dev_err(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3528 "NVME-FC{%d}: failed to schedule initial connect\n", in nvme_fc_init_ctrl()
3529 ctrl->cnum); in nvme_fc_init_ctrl()
3533 flush_delayed_work(&ctrl->connect_work); in nvme_fc_init_ctrl()
3535 dev_info(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3536 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", in nvme_fc_init_ctrl()
3537 ctrl->cnum, ctrl->ctrl.opts->subsysnqn); in nvme_fc_init_ctrl()
3539 return &ctrl->ctrl; in nvme_fc_init_ctrl()
3542 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); in nvme_fc_init_ctrl()
3543 cancel_work_sync(&ctrl->ctrl.reset_work); in nvme_fc_init_ctrl()
3544 cancel_delayed_work_sync(&ctrl->connect_work); in nvme_fc_init_ctrl()
3546 ctrl->ctrl.opts = NULL; in nvme_fc_init_ctrl()
3549 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_fc_init_ctrl()
3552 nvme_put_ctrl(&ctrl->ctrl); in nvme_fc_init_ctrl()
3563 return ERR_PTR(-EIO); in nvme_fc_init_ctrl()
3566 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_fc_init_ctrl()
3568 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_fc_init_ctrl()
3570 blk_mq_free_tag_set(&ctrl->admin_tag_set); in nvme_fc_init_ctrl()
3572 kfree(ctrl->queues); in nvme_fc_init_ctrl()
3574 put_device(ctrl->dev); in nvme_fc_init_ctrl()
3575 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); in nvme_fc_init_ctrl()
3595 return -EINVAL; in __nvme_fc_parse_u64()
3610 substring_t wwn = { name, &name[sizeof(name)-1] }; in nvme_fc_parse_traddr()
3615 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && in nvme_fc_parse_traddr()
3617 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { in nvme_fc_parse_traddr()
3622 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && in nvme_fc_parse_traddr()
3624 "pn-", NVME_FC_TRADDR_NNLEN))) { in nvme_fc_parse_traddr()
3635 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) in nvme_fc_parse_traddr()
3639 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) in nvme_fc_parse_traddr()
3646 return -EINVAL; in nvme_fc_parse_traddr()
3660 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); in nvme_fc_create_ctrl()
3662 return ERR_PTR(-EINVAL); in nvme_fc_create_ctrl()
3664 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); in nvme_fc_create_ctrl()
3666 return ERR_PTR(-EINVAL); in nvme_fc_create_ctrl()
3671 if (lport->localport.node_name != laddr.nn || in nvme_fc_create_ctrl()
3672 lport->localport.port_name != laddr.pn || in nvme_fc_create_ctrl()
3673 lport->localport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_create_ctrl()
3676 list_for_each_entry(rport, &lport->endp_list, endp_list) { in nvme_fc_create_ctrl()
3677 if (rport->remoteport.node_name != raddr.nn || in nvme_fc_create_ctrl()
3678 rport->remoteport.port_name != raddr.pn || in nvme_fc_create_ctrl()
3679 rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_create_ctrl()
3696 pr_warn("%s: %s - %s combination not found\n", in nvme_fc_create_ctrl()
3697 __func__, opts->traddr, opts->host_traddr); in nvme_fc_create_ctrl()
3698 return ERR_PTR(-ENOENT); in nvme_fc_create_ctrl()
3725 list_for_each_entry(rport, &lport->endp_list, endp_list) { in nvme_fc_nvme_discovery_store()
3747 if (list_empty(&rport->disc_list)) in nvme_fc_nvme_discovery_store()
3748 list_add_tail(&rport->disc_list, in nvme_fc_nvme_discovery_store()
3757 list_del_init(&rport->disc_list); in nvme_fc_nvme_discovery_store()
3760 lport = rport->lport; in nvme_fc_nvme_discovery_store()
3800 return -ENOMEM; in nvme_fc_init_module()
3805 * the FC-isms that are currently under scsi and now being in nvme_fc_init_module()
3810 * As we need something to post FC-specific udev events to, in nvme_fc_init_module()
3823 * Create a device for the FC-centric udev events in nvme_fc_init_module()
3854 spin_lock(&rport->lock); in nvme_fc_delete_controllers()
3855 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_delete_controllers()
3856 dev_warn(ctrl->ctrl.device, in nvme_fc_delete_controllers()
3857 "NVME-FC{%d}: transport unloading: deleting ctrl\n", in nvme_fc_delete_controllers()
3858 ctrl->cnum); in nvme_fc_delete_controllers()
3859 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_delete_controllers()
3861 spin_unlock(&rport->lock); in nvme_fc_delete_controllers()
3871 list_for_each_entry(rport, &lport->endp_list, endp_list) { in nvme_fc_cleanup_for_unload()