Lines Matching +full:supports +full:- +full:cqe

1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/blk-mq.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
55 struct list_head lsreq_list; /* tgtport->ls_req_list */
60 /* desired maximum for a single sequence - if sg list allows it */
93 struct list_head fcp_list; /* tgtport->fcp_list */
176 return (iodptr - iodptr->tgtport->iod); in nvmet_fc_iodnum()
182 return (fodptr - fodptr->queue->fod); in nvmet_fc_fodnum()
198 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
203 return (assoc->association_id | qid); in nvmet_fc_makeconnid()
257 /* *********************** FC-NVME DMA Handling **************************** */
322 s->dma_address = 0L; in fc_map_sg()
324 s->dma_length = s->length; in fc_map_sg()
346 /* ********************** FC-NVME LS XMT Handling ************************* */
352 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; in __nvmet_fc_finish_ls_req()
353 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvmet_fc_finish_ls_req()
356 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
358 if (!lsop->req_queued) { in __nvmet_fc_finish_ls_req()
359 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
363 list_del(&lsop->lsreq_list); in __nvmet_fc_finish_ls_req()
365 lsop->req_queued = false; in __nvmet_fc_finish_ls_req()
367 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
369 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in __nvmet_fc_finish_ls_req()
370 (lsreq->rqstlen + lsreq->rsplen), in __nvmet_fc_finish_ls_req()
381 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvmet_fc_send_ls_req()
385 if (!tgtport->ops->ls_req) in __nvmet_fc_send_ls_req()
386 return -EOPNOTSUPP; in __nvmet_fc_send_ls_req()
389 return -ESHUTDOWN; in __nvmet_fc_send_ls_req()
391 lsreq->done = done; in __nvmet_fc_send_ls_req()
392 lsop->req_queued = false; in __nvmet_fc_send_ls_req()
393 INIT_LIST_HEAD(&lsop->lsreq_list); in __nvmet_fc_send_ls_req()
395 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, in __nvmet_fc_send_ls_req()
396 lsreq->rqstlen + lsreq->rsplen, in __nvmet_fc_send_ls_req()
398 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { in __nvmet_fc_send_ls_req()
399 ret = -EFAULT; in __nvmet_fc_send_ls_req()
402 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; in __nvmet_fc_send_ls_req()
404 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
406 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); in __nvmet_fc_send_ls_req()
408 lsop->req_queued = true; in __nvmet_fc_send_ls_req()
410 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
412 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, in __nvmet_fc_send_ls_req()
420 lsop->ls_error = ret; in __nvmet_fc_send_ls_req()
421 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
422 lsop->req_queued = false; in __nvmet_fc_send_ls_req()
423 list_del(&lsop->lsreq_list); in __nvmet_fc_send_ls_req()
424 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
425 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in __nvmet_fc_send_ls_req()
426 (lsreq->rqstlen + lsreq->rsplen), in __nvmet_fc_send_ls_req()
452 /* fc-nvme target doesn't care about success or failure of cmd */ in nvmet_fc_disconnect_assoc_done()
458 * This routine sends a FC-NVME LS to disconnect (aka terminate)
459 * the FC-NVME Association. Terminating the association also
460 * terminates the FC-NVME connections (per queue, both admin and io
462 * down, and the related FC-NVME Association ID and Connection IDs
465 * The behavior of the fc-nvme target is such that it's
468 * connectivity with the fc-nvme host, so the target may never get a
471 * continue on with terminating the association. If the fc-nvme host
477 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_xmt_disconnect_assoc()
489 if (!tgtport->ops->ls_req || !assoc->hostport || in nvmet_fc_xmt_disconnect_assoc()
490 assoc->hostport->invalid) in nvmet_fc_xmt_disconnect_assoc()
495 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvmet_fc_xmt_disconnect_assoc()
497 dev_info(tgtport->dev, in nvmet_fc_xmt_disconnect_assoc()
499 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_xmt_disconnect_assoc()
505 lsreq = &lsop->ls_req; in nvmet_fc_xmt_disconnect_assoc()
506 if (tgtport->ops->lsrqst_priv_sz) in nvmet_fc_xmt_disconnect_assoc()
507 lsreq->private = (void *)&discon_acc[1]; in nvmet_fc_xmt_disconnect_assoc()
509 lsreq->private = NULL; in nvmet_fc_xmt_disconnect_assoc()
511 lsop->tgtport = tgtport; in nvmet_fc_xmt_disconnect_assoc()
512 lsop->hosthandle = assoc->hostport->hosthandle; in nvmet_fc_xmt_disconnect_assoc()
515 assoc->association_id); in nvmet_fc_xmt_disconnect_assoc()
520 dev_info(tgtport->dev, in nvmet_fc_xmt_disconnect_assoc()
522 tgtport->fc_target_port.port_num, assoc->a_id, ret); in nvmet_fc_xmt_disconnect_assoc()
528 /* *********************** FC-NVME Port Management ************************ */
540 return -ENOMEM; in nvmet_fc_alloc_ls_iodlist()
542 tgtport->iod = iod; in nvmet_fc_alloc_ls_iodlist()
545 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); in nvmet_fc_alloc_ls_iodlist()
546 iod->tgtport = tgtport; in nvmet_fc_alloc_ls_iodlist()
547 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
549 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + in nvmet_fc_alloc_ls_iodlist()
552 if (!iod->rqstbuf) in nvmet_fc_alloc_ls_iodlist()
555 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; in nvmet_fc_alloc_ls_iodlist()
557 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, in nvmet_fc_alloc_ls_iodlist()
558 sizeof(*iod->rspbuf), in nvmet_fc_alloc_ls_iodlist()
560 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) in nvmet_fc_alloc_ls_iodlist()
567 kfree(iod->rqstbuf); in nvmet_fc_alloc_ls_iodlist()
568 list_del(&iod->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
569 for (iod--, i--; i >= 0; iod--, i--) { in nvmet_fc_alloc_ls_iodlist()
570 fc_dma_unmap_single(tgtport->dev, iod->rspdma, in nvmet_fc_alloc_ls_iodlist()
571 sizeof(*iod->rspbuf), DMA_TO_DEVICE); in nvmet_fc_alloc_ls_iodlist()
572 kfree(iod->rqstbuf); in nvmet_fc_alloc_ls_iodlist()
573 list_del(&iod->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
578 return -EFAULT; in nvmet_fc_alloc_ls_iodlist()
584 struct nvmet_fc_ls_iod *iod = tgtport->iod; in nvmet_fc_free_ls_iodlist()
588 fc_dma_unmap_single(tgtport->dev, in nvmet_fc_free_ls_iodlist()
589 iod->rspdma, sizeof(*iod->rspbuf), in nvmet_fc_free_ls_iodlist()
591 kfree(iod->rqstbuf); in nvmet_fc_free_ls_iodlist()
592 list_del(&iod->ls_rcv_list); in nvmet_fc_free_ls_iodlist()
594 kfree(tgtport->iod); in nvmet_fc_free_ls_iodlist()
603 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_ls_iod()
604 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, in nvmet_fc_alloc_ls_iod()
607 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); in nvmet_fc_alloc_ls_iod()
608 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_ls_iod()
619 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_free_ls_iod()
620 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); in nvmet_fc_free_ls_iod()
621 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_free_ls_iod()
628 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_prep_fcp_iodlist()
631 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_prep_fcp_iodlist()
632 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); in nvmet_fc_prep_fcp_iodlist()
633 fod->tgtport = tgtport; in nvmet_fc_prep_fcp_iodlist()
634 fod->queue = queue; in nvmet_fc_prep_fcp_iodlist()
635 fod->active = false; in nvmet_fc_prep_fcp_iodlist()
636 fod->abort = false; in nvmet_fc_prep_fcp_iodlist()
637 fod->aborted = false; in nvmet_fc_prep_fcp_iodlist()
638 fod->fcpreq = NULL; in nvmet_fc_prep_fcp_iodlist()
639 list_add_tail(&fod->fcp_list, &queue->fod_list); in nvmet_fc_prep_fcp_iodlist()
640 spin_lock_init(&fod->flock); in nvmet_fc_prep_fcp_iodlist()
642 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, in nvmet_fc_prep_fcp_iodlist()
643 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_iodlist()
644 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { in nvmet_fc_prep_fcp_iodlist()
645 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
646 for (fod--, i--; i >= 0; fod--, i--) { in nvmet_fc_prep_fcp_iodlist()
647 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_iodlist()
648 sizeof(fod->rspiubuf), in nvmet_fc_prep_fcp_iodlist()
650 fod->rspdma = 0L; in nvmet_fc_prep_fcp_iodlist()
651 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
663 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_destroy_fcp_iodlist()
666 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_destroy_fcp_iodlist()
667 if (fod->rspdma) in nvmet_fc_destroy_fcp_iodlist()
668 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_destroy_fcp_iodlist()
669 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_destroy_fcp_iodlist()
678 lockdep_assert_held(&queue->qlock); in nvmet_fc_alloc_fcp_iod()
680 fod = list_first_entry_or_null(&queue->fod_list, in nvmet_fc_alloc_fcp_iod()
683 list_del(&fod->fcp_list); in nvmet_fc_alloc_fcp_iod()
684 fod->active = true; in nvmet_fc_alloc_fcp_iod()
700 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_queue_fcp_req()
706 fcpreq->hwqid = queue->qid ? in nvmet_fc_queue_fcp_req()
707 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; in nvmet_fc_queue_fcp_req()
719 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); in nvmet_fc_fcp_rqst_op_defer_work()
727 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_free_fcp_iod()
728 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_free_fcp_iod()
732 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, in nvmet_fc_free_fcp_iod()
733 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_free_fcp_iod()
735 fcpreq->nvmet_fc_private = NULL; in nvmet_fc_free_fcp_iod()
737 fod->active = false; in nvmet_fc_free_fcp_iod()
738 fod->abort = false; in nvmet_fc_free_fcp_iod()
739 fod->aborted = false; in nvmet_fc_free_fcp_iod()
740 fod->writedataactive = false; in nvmet_fc_free_fcp_iod()
741 fod->fcpreq = NULL; in nvmet_fc_free_fcp_iod()
743 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); in nvmet_fc_free_fcp_iod()
748 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_free_fcp_iod()
749 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, in nvmet_fc_free_fcp_iod()
752 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); in nvmet_fc_free_fcp_iod()
753 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_free_fcp_iod()
757 /* Re-use the fod for the next pending cmd that was deferred */ in nvmet_fc_free_fcp_iod()
758 list_del(&deferfcp->req_list); in nvmet_fc_free_fcp_iod()
760 fcpreq = deferfcp->fcp_req; in nvmet_fc_free_fcp_iod()
763 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); in nvmet_fc_free_fcp_iod()
765 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_free_fcp_iod()
768 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); in nvmet_fc_free_fcp_iod()
771 fcpreq->rspaddr = NULL; in nvmet_fc_free_fcp_iod()
772 fcpreq->rsplen = 0; in nvmet_fc_free_fcp_iod()
773 fcpreq->nvmet_fc_private = fod; in nvmet_fc_free_fcp_iod()
774 fod->fcpreq = fcpreq; in nvmet_fc_free_fcp_iod()
775 fod->active = true; in nvmet_fc_free_fcp_iod()
778 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); in nvmet_fc_free_fcp_iod()
785 queue_work(queue->work_q, &fod->defer_work); in nvmet_fc_free_fcp_iod()
806 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, in nvmet_fc_alloc_target_queue()
807 assoc->tgtport->fc_target_port.port_num, in nvmet_fc_alloc_target_queue()
808 assoc->a_id, qid); in nvmet_fc_alloc_target_queue()
809 if (!queue->work_q) in nvmet_fc_alloc_target_queue()
812 queue->qid = qid; in nvmet_fc_alloc_target_queue()
813 queue->sqsize = sqsize; in nvmet_fc_alloc_target_queue()
814 queue->assoc = assoc; in nvmet_fc_alloc_target_queue()
815 INIT_LIST_HEAD(&queue->fod_list); in nvmet_fc_alloc_target_queue()
816 INIT_LIST_HEAD(&queue->avail_defer_list); in nvmet_fc_alloc_target_queue()
817 INIT_LIST_HEAD(&queue->pending_cmd_list); in nvmet_fc_alloc_target_queue()
818 atomic_set(&queue->connected, 0); in nvmet_fc_alloc_target_queue()
819 atomic_set(&queue->sqtail, 0); in nvmet_fc_alloc_target_queue()
820 atomic_set(&queue->rsn, 1); in nvmet_fc_alloc_target_queue()
821 atomic_set(&queue->zrspcnt, 0); in nvmet_fc_alloc_target_queue()
822 spin_lock_init(&queue->qlock); in nvmet_fc_alloc_target_queue()
823 kref_init(&queue->ref); in nvmet_fc_alloc_target_queue()
825 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
827 ret = nvmet_sq_init(&queue->nvme_sq); in nvmet_fc_alloc_target_queue()
831 WARN_ON(assoc->queues[qid]); in nvmet_fc_alloc_target_queue()
832 spin_lock_irqsave(&assoc->tgtport->lock, flags); in nvmet_fc_alloc_target_queue()
833 assoc->queues[qid] = queue; in nvmet_fc_alloc_target_queue()
834 spin_unlock_irqrestore(&assoc->tgtport->lock, flags); in nvmet_fc_alloc_target_queue()
839 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
840 destroy_workqueue(queue->work_q); in nvmet_fc_alloc_target_queue()
856 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags); in nvmet_fc_tgt_queue_free()
857 queue->assoc->queues[queue->qid] = NULL; in nvmet_fc_tgt_queue_free()
858 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags); in nvmet_fc_tgt_queue_free()
860 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); in nvmet_fc_tgt_queue_free()
862 nvmet_fc_tgt_a_put(queue->assoc); in nvmet_fc_tgt_queue_free()
864 destroy_workqueue(queue->work_q); in nvmet_fc_tgt_queue_free()
872 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); in nvmet_fc_tgt_q_put()
878 return kref_get_unless_zero(&queue->ref); in nvmet_fc_tgt_q_get()
885 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; in nvmet_fc_delete_target_queue()
886 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_delete_target_queue()
892 disconnect = atomic_xchg(&queue->connected, 0); in nvmet_fc_delete_target_queue()
898 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
900 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_delete_target_queue()
901 if (fod->active) { in nvmet_fc_delete_target_queue()
902 spin_lock(&fod->flock); in nvmet_fc_delete_target_queue()
903 fod->abort = true; in nvmet_fc_delete_target_queue()
909 if (fod->writedataactive) { in nvmet_fc_delete_target_queue()
910 fod->aborted = true; in nvmet_fc_delete_target_queue()
911 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
912 tgtport->ops->fcp_abort( in nvmet_fc_delete_target_queue()
913 &tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_delete_target_queue()
915 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
920 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, in nvmet_fc_delete_target_queue()
922 list_del(&deferfcp->req_list); in nvmet_fc_delete_target_queue()
927 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, in nvmet_fc_delete_target_queue()
932 list_del(&deferfcp->req_list); in nvmet_fc_delete_target_queue()
933 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
935 tgtport->ops->defer_rcv(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
936 deferfcp->fcp_req); in nvmet_fc_delete_target_queue()
938 tgtport->ops->fcp_abort(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
939 deferfcp->fcp_req); in nvmet_fc_delete_target_queue()
941 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
942 deferfcp->fcp_req); in nvmet_fc_delete_target_queue()
949 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
951 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
953 flush_workqueue(queue->work_q); in nvmet_fc_delete_target_queue()
955 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_fc_delete_target_queue()
973 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_find_target_queue()
974 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_queue()
975 if (association_id == assoc->association_id) { in nvmet_fc_find_target_queue()
976 queue = assoc->queues[qid]; in nvmet_fc_find_target_queue()
978 (!atomic_read(&queue->connected) || in nvmet_fc_find_target_queue()
981 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_find_target_queue()
985 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_find_target_queue()
994 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; in nvmet_fc_hostport_free()
997 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_hostport_free()
998 list_del(&hostport->host_list); in nvmet_fc_hostport_free()
999 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_hostport_free()
1000 if (tgtport->ops->host_release && hostport->invalid) in nvmet_fc_hostport_free()
1001 tgtport->ops->host_release(hostport->hosthandle); in nvmet_fc_hostport_free()
1009 kref_put(&hostport->ref, nvmet_fc_hostport_free); in nvmet_fc_hostport_put()
1015 return kref_get_unless_zero(&hostport->ref); in nvmet_fc_hostport_get()
1022 if (!hostport || !hostport->hosthandle) in nvmet_fc_free_hostport()
1040 return ERR_PTR(-EINVAL); in nvmet_fc_alloc_hostport()
1044 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1045 list_for_each_entry(host, &tgtport->host_list, host_list) { in nvmet_fc_alloc_hostport()
1046 if (host->hosthandle == hosthandle && !host->invalid) { in nvmet_fc_alloc_hostport()
1053 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1054 /* no allocation - release reference */ in nvmet_fc_alloc_hostport()
1056 return (match) ? match : ERR_PTR(-ENOMEM); in nvmet_fc_alloc_hostport()
1059 newhost->tgtport = tgtport; in nvmet_fc_alloc_hostport()
1060 newhost->hosthandle = hosthandle; in nvmet_fc_alloc_hostport()
1061 INIT_LIST_HEAD(&newhost->host_list); in nvmet_fc_alloc_hostport()
1062 kref_init(&newhost->ref); in nvmet_fc_alloc_hostport()
1064 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1065 list_for_each_entry(host, &tgtport->host_list, host_list) { in nvmet_fc_alloc_hostport()
1066 if (host->hosthandle == hosthandle && !host->invalid) { in nvmet_fc_alloc_hostport()
1076 /* releasing allocation - release reference */ in nvmet_fc_alloc_hostport()
1079 list_add_tail(&newhost->host_list, &tgtport->host_list); in nvmet_fc_alloc_hostport()
1080 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1108 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL); in nvmet_fc_alloc_target_assoc()
1115 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); in nvmet_fc_alloc_target_assoc()
1116 if (IS_ERR(assoc->hostport)) in nvmet_fc_alloc_target_assoc()
1119 assoc->tgtport = tgtport; in nvmet_fc_alloc_target_assoc()
1120 assoc->a_id = idx; in nvmet_fc_alloc_target_assoc()
1121 INIT_LIST_HEAD(&assoc->a_list); in nvmet_fc_alloc_target_assoc()
1122 kref_init(&assoc->ref); in nvmet_fc_alloc_target_assoc()
1123 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); in nvmet_fc_alloc_target_assoc()
1124 atomic_set(&assoc->terminating, 0); in nvmet_fc_alloc_target_assoc()
1127 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); in nvmet_fc_alloc_target_assoc()
1130 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_target_assoc()
1132 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { in nvmet_fc_alloc_target_assoc()
1133 if (ran == tmpassoc->association_id) { in nvmet_fc_alloc_target_assoc()
1139 assoc->association_id = ran; in nvmet_fc_alloc_target_assoc()
1140 list_add_tail(&assoc->a_list, &tgtport->assoc_list); in nvmet_fc_alloc_target_assoc()
1142 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_target_assoc()
1150 ida_simple_remove(&tgtport->assoc_cnt, idx); in nvmet_fc_alloc_target_assoc()
1161 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_target_assoc_free()
1168 nvmet_fc_free_hostport(assoc->hostport); in nvmet_fc_target_assoc_free()
1169 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_target_assoc_free()
1170 list_del(&assoc->a_list); in nvmet_fc_target_assoc_free()
1171 oldls = assoc->rcv_disconn; in nvmet_fc_target_assoc_free()
1172 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_target_assoc_free()
1176 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id); in nvmet_fc_target_assoc_free()
1177 dev_info(tgtport->dev, in nvmet_fc_target_assoc_free()
1179 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_target_assoc_free()
1187 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); in nvmet_fc_tgt_a_put()
1193 return kref_get_unless_zero(&assoc->ref); in nvmet_fc_tgt_a_get()
1199 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_delete_target_assoc()
1204 terminating = atomic_xchg(&assoc->terminating, 1); in nvmet_fc_delete_target_assoc()
1210 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1211 for (i = NVMET_NR_QUEUES; i >= 0; i--) { in nvmet_fc_delete_target_assoc()
1212 queue = assoc->queues[i]; in nvmet_fc_delete_target_assoc()
1216 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1219 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1222 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1224 dev_info(tgtport->dev, in nvmet_fc_delete_target_assoc()
1226 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_delete_target_assoc()
1239 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_find_target_assoc()
1240 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_assoc()
1241 if (association_id == assoc->association_id) { in nvmet_fc_find_target_assoc()
1248 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_find_target_assoc()
1260 pe->tgtport = tgtport; in nvmet_fc_portentry_bind()
1261 tgtport->pe = pe; in nvmet_fc_portentry_bind()
1263 pe->port = port; in nvmet_fc_portentry_bind()
1264 port->priv = pe; in nvmet_fc_portentry_bind()
1266 pe->node_name = tgtport->fc_target_port.node_name; in nvmet_fc_portentry_bind()
1267 pe->port_name = tgtport->fc_target_port.port_name; in nvmet_fc_portentry_bind()
1268 INIT_LIST_HEAD(&pe->pe_list); in nvmet_fc_portentry_bind()
1270 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); in nvmet_fc_portentry_bind()
1279 if (pe->tgtport) in nvmet_fc_portentry_unbind()
1280 pe->tgtport->pe = NULL; in nvmet_fc_portentry_unbind()
1281 list_del(&pe->pe_list); in nvmet_fc_portentry_unbind()
1288 * re-registration can resume operation.
1297 pe = tgtport->pe; in nvmet_fc_portentry_unbind_tgt()
1299 pe->tgtport = NULL; in nvmet_fc_portentry_unbind_tgt()
1300 tgtport->pe = NULL; in nvmet_fc_portentry_unbind_tgt()
1320 if (tgtport->fc_target_port.node_name == pe->node_name && in nvmet_fc_portentry_rebind_tgt()
1321 tgtport->fc_target_port.port_name == pe->port_name) { in nvmet_fc_portentry_rebind_tgt()
1322 WARN_ON(pe->tgtport); in nvmet_fc_portentry_rebind_tgt()
1323 tgtport->pe = pe; in nvmet_fc_portentry_rebind_tgt()
1324 pe->tgtport = tgtport; in nvmet_fc_portentry_rebind_tgt()
1332 * nvme_fc_register_targetport - transport entry point called by an
1346 * (ex: -ENXIO) upon failure.
1358 if (!template->xmt_ls_rsp || !template->fcp_op || in nvmet_fc_register_targetport()
1359 !template->fcp_abort || in nvmet_fc_register_targetport()
1360 !template->fcp_req_release || !template->targetport_delete || in nvmet_fc_register_targetport()
1361 !template->max_hw_queues || !template->max_sgl_segments || in nvmet_fc_register_targetport()
1362 !template->max_dif_sgl_segments || !template->dma_boundary) { in nvmet_fc_register_targetport()
1363 ret = -EINVAL; in nvmet_fc_register_targetport()
1367 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), in nvmet_fc_register_targetport()
1370 ret = -ENOMEM; in nvmet_fc_register_targetport()
1376 ret = -ENOSPC; in nvmet_fc_register_targetport()
1381 ret = -ENODEV; in nvmet_fc_register_targetport()
1385 newrec->fc_target_port.node_name = pinfo->node_name; in nvmet_fc_register_targetport()
1386 newrec->fc_target_port.port_name = pinfo->port_name; in nvmet_fc_register_targetport()
1387 if (template->target_priv_sz) in nvmet_fc_register_targetport()
1388 newrec->fc_target_port.private = &newrec[1]; in nvmet_fc_register_targetport()
1390 newrec->fc_target_port.private = NULL; in nvmet_fc_register_targetport()
1391 newrec->fc_target_port.port_id = pinfo->port_id; in nvmet_fc_register_targetport()
1392 newrec->fc_target_port.port_num = idx; in nvmet_fc_register_targetport()
1393 INIT_LIST_HEAD(&newrec->tgt_list); in nvmet_fc_register_targetport()
1394 newrec->dev = dev; in nvmet_fc_register_targetport()
1395 newrec->ops = template; in nvmet_fc_register_targetport()
1396 spin_lock_init(&newrec->lock); in nvmet_fc_register_targetport()
1397 INIT_LIST_HEAD(&newrec->ls_rcv_list); in nvmet_fc_register_targetport()
1398 INIT_LIST_HEAD(&newrec->ls_req_list); in nvmet_fc_register_targetport()
1399 INIT_LIST_HEAD(&newrec->ls_busylist); in nvmet_fc_register_targetport()
1400 INIT_LIST_HEAD(&newrec->assoc_list); in nvmet_fc_register_targetport()
1401 INIT_LIST_HEAD(&newrec->host_list); in nvmet_fc_register_targetport()
1402 kref_init(&newrec->ref); in nvmet_fc_register_targetport()
1403 ida_init(&newrec->assoc_cnt); in nvmet_fc_register_targetport()
1404 newrec->max_sg_cnt = template->max_sgl_segments; in nvmet_fc_register_targetport()
1408 ret = -ENOMEM; in nvmet_fc_register_targetport()
1415 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); in nvmet_fc_register_targetport()
1418 *portptr = &newrec->fc_target_port; in nvmet_fc_register_targetport()
1439 struct device *dev = tgtport->dev; in nvmet_fc_free_tgtport()
1443 list_del(&tgtport->tgt_list); in nvmet_fc_free_tgtport()
1449 tgtport->ops->targetport_delete(&tgtport->fc_target_port); in nvmet_fc_free_tgtport()
1452 tgtport->fc_target_port.port_num); in nvmet_fc_free_tgtport()
1454 ida_destroy(&tgtport->assoc_cnt); in nvmet_fc_free_tgtport()
1464 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); in nvmet_fc_tgtport_put()
1470 return kref_get_unless_zero(&tgtport->ref); in nvmet_fc_tgtport_get()
1479 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_free_assocs()
1481 &tgtport->assoc_list, a_list) { in __nvmet_fc_free_assocs()
1484 if (!schedule_work(&assoc->del_work)) in __nvmet_fc_free_assocs()
1485 /* already deleting - release local reference */ in __nvmet_fc_free_assocs()
1488 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_free_assocs()
1492 * nvmet_fc_invalidate_host - transport entry point called by an LLDD
1495 * The nvmet-fc layer ensures that any references to the hosthandle
1505 * retries by the nvmet-fc transport. The nvmet-fc transport may
1507 * NVME associations. The nvmet-fc transport will call the
1508 * ops->host_release() callback to notify the LLDD that all references
1529 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_invalidate_host()
1531 &tgtport->assoc_list, a_list) { in nvmet_fc_invalidate_host()
1532 if (!assoc->hostport || in nvmet_fc_invalidate_host()
1533 assoc->hostport->hosthandle != hosthandle) in nvmet_fc_invalidate_host()
1537 assoc->hostport->invalid = 1; in nvmet_fc_invalidate_host()
1539 if (!schedule_work(&assoc->del_work)) in nvmet_fc_invalidate_host()
1540 /* already deleting - release local reference */ in nvmet_fc_invalidate_host()
1543 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_invalidate_host()
1545 /* if there's nothing to wait for - call the callback */ in nvmet_fc_invalidate_host()
1546 if (noassoc && tgtport->ops->host_release) in nvmet_fc_invalidate_host()
1547 tgtport->ops->host_release(hosthandle); in nvmet_fc_invalidate_host()
1571 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_delete_ctrl()
1572 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_delete_ctrl()
1573 queue = assoc->queues[0]; in nvmet_fc_delete_ctrl()
1574 if (queue && queue->nvme_sq.ctrl == ctrl) { in nvmet_fc_delete_ctrl()
1580 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_delete_ctrl()
1585 if (!schedule_work(&assoc->del_work)) in nvmet_fc_delete_ctrl()
1586 /* already deleting - release local reference */ in nvmet_fc_delete_ctrl()
1597 * nvme_fc_unregister_targetport - transport entry point called by an
1605 * (ex: -ENXIO) upon failure.
1631 /* ********************** FC-NVME LS RCV Handling ************************* */
1638 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; in nvmet_fc_ls_create_association()
1639 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; in nvmet_fc_ls_create_association()
1646 * FC-NVME spec changes. There are initiators sending different in nvmet_fc_ls_create_association()
1653 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) in nvmet_fc_ls_create_association()
1655 else if (be32_to_cpu(rqst->desc_list_len) < in nvmet_fc_ls_create_association()
1658 else if (rqst->assoc_cmd.desc_tag != in nvmet_fc_ls_create_association()
1661 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < in nvmet_fc_ls_create_association()
1664 else if (!rqst->assoc_cmd.ersp_ratio || in nvmet_fc_ls_create_association()
1665 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= in nvmet_fc_ls_create_association()
1666 be16_to_cpu(rqst->assoc_cmd.sqsize))) in nvmet_fc_ls_create_association()
1671 iod->assoc = nvmet_fc_alloc_target_assoc( in nvmet_fc_ls_create_association()
1672 tgtport, iod->hosthandle); in nvmet_fc_ls_create_association()
1673 if (!iod->assoc) in nvmet_fc_ls_create_association()
1676 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, in nvmet_fc_ls_create_association()
1677 be16_to_cpu(rqst->assoc_cmd.sqsize)); in nvmet_fc_ls_create_association()
1684 dev_err(tgtport->dev, in nvmet_fc_ls_create_association()
1687 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvmet_fc_ls_create_association()
1688 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_create_association()
1694 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); in nvmet_fc_ls_create_association()
1695 atomic_set(&queue->connected, 1); in nvmet_fc_ls_create_association()
1696 queue->sqhd = 0; /* best place to init value */ in nvmet_fc_ls_create_association()
1698 dev_info(tgtport->dev, in nvmet_fc_ls_create_association()
1700 tgtport->fc_target_port.port_num, iod->assoc->a_id); in nvmet_fc_ls_create_association()
1704 iod->lsrsp->rsplen = sizeof(*acc); in nvmet_fc_ls_create_association()
1710 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); in nvmet_fc_ls_create_association()
1711 acc->associd.desc_len = in nvmet_fc_ls_create_association()
1714 acc->associd.association_id = in nvmet_fc_ls_create_association()
1715 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); in nvmet_fc_ls_create_association()
1716 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); in nvmet_fc_ls_create_association()
1717 acc->connectid.desc_len = in nvmet_fc_ls_create_association()
1720 acc->connectid.connection_id = acc->associd.association_id; in nvmet_fc_ls_create_association()
1727 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; in nvmet_fc_ls_create_connection()
1728 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; in nvmet_fc_ls_create_connection()
1734 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) in nvmet_fc_ls_create_connection()
1736 else if (rqst->desc_list_len != in nvmet_fc_ls_create_connection()
1740 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) in nvmet_fc_ls_create_connection()
1742 else if (rqst->associd.desc_len != in nvmet_fc_ls_create_connection()
1746 else if (rqst->connect_cmd.desc_tag != in nvmet_fc_ls_create_connection()
1749 else if (rqst->connect_cmd.desc_len != in nvmet_fc_ls_create_connection()
1753 else if (!rqst->connect_cmd.ersp_ratio || in nvmet_fc_ls_create_connection()
1754 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= in nvmet_fc_ls_create_connection()
1755 be16_to_cpu(rqst->connect_cmd.sqsize))) in nvmet_fc_ls_create_connection()
1760 iod->assoc = nvmet_fc_find_target_assoc(tgtport, in nvmet_fc_ls_create_connection()
1761 be64_to_cpu(rqst->associd.association_id)); in nvmet_fc_ls_create_connection()
1762 if (!iod->assoc) in nvmet_fc_ls_create_connection()
1765 queue = nvmet_fc_alloc_target_queue(iod->assoc, in nvmet_fc_ls_create_connection()
1766 be16_to_cpu(rqst->connect_cmd.qid), in nvmet_fc_ls_create_connection()
1767 be16_to_cpu(rqst->connect_cmd.sqsize)); in nvmet_fc_ls_create_connection()
1772 nvmet_fc_tgt_a_put(iod->assoc); in nvmet_fc_ls_create_connection()
1777 dev_err(tgtport->dev, in nvmet_fc_ls_create_connection()
1780 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvmet_fc_ls_create_connection()
1781 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_create_connection()
1789 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); in nvmet_fc_ls_create_connection()
1790 atomic_set(&queue->connected, 1); in nvmet_fc_ls_create_connection()
1791 queue->sqhd = 0; /* best place to init value */ in nvmet_fc_ls_create_connection()
1795 iod->lsrsp->rsplen = sizeof(*acc); in nvmet_fc_ls_create_connection()
1800 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); in nvmet_fc_ls_create_connection()
1801 acc->connectid.desc_len = in nvmet_fc_ls_create_connection()
1804 acc->connectid.connection_id = in nvmet_fc_ls_create_connection()
1805 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, in nvmet_fc_ls_create_connection()
1806 be16_to_cpu(rqst->connect_cmd.qid))); in nvmet_fc_ls_create_connection()
1818 &iod->rqstbuf->rq_dis_assoc; in nvmet_fc_ls_disconnect()
1820 &iod->rspbuf->rsp_dis_assoc; in nvmet_fc_ls_disconnect()
1828 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); in nvmet_fc_ls_disconnect()
1830 /* match an active association - takes an assoc ref if !NULL */ in nvmet_fc_ls_disconnect()
1832 be64_to_cpu(rqst->associd.association_id)); in nvmet_fc_ls_disconnect()
1833 iod->assoc = assoc; in nvmet_fc_ls_disconnect()
1839 dev_err(tgtport->dev, in nvmet_fc_ls_disconnect()
1842 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvmet_fc_ls_disconnect()
1843 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_disconnect()
1853 iod->lsrsp->rsplen = sizeof(*acc); in nvmet_fc_ls_disconnect()
1872 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_ls_disconnect()
1873 oldls = assoc->rcv_disconn; in nvmet_fc_ls_disconnect()
1874 assoc->rcv_disconn = iod; in nvmet_fc_ls_disconnect()
1875 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_ls_disconnect()
1880 dev_info(tgtport->dev, in nvmet_fc_ls_disconnect()
1883 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_ls_disconnect()
1885 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, in nvmet_fc_ls_disconnect()
1886 sizeof(*iod->rspbuf), in nvmet_fc_ls_disconnect()
1888 rqst->w0.ls_cmd, in nvmet_fc_ls_disconnect()
1908 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; in nvmet_fc_xmt_ls_rsp_done()
1909 struct nvmet_fc_tgtport *tgtport = iod->tgtport; in nvmet_fc_xmt_ls_rsp_done()
1911 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, in nvmet_fc_xmt_ls_rsp_done()
1912 sizeof(*iod->rspbuf), DMA_TO_DEVICE); in nvmet_fc_xmt_ls_rsp_done()
1923 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, in nvmet_fc_xmt_ls_rsp()
1924 sizeof(*iod->rspbuf), DMA_TO_DEVICE); in nvmet_fc_xmt_ls_rsp()
1926 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); in nvmet_fc_xmt_ls_rsp()
1928 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); in nvmet_fc_xmt_ls_rsp()
1932 * Actual processing routine for received FC-NVME LS Requests from the LLD
1938 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; in nvmet_fc_handle_ls_rqst()
1941 iod->lsrsp->nvme_fc_private = iod; in nvmet_fc_handle_ls_rqst()
1942 iod->lsrsp->rspbuf = iod->rspbuf; in nvmet_fc_handle_ls_rqst()
1943 iod->lsrsp->rspdma = iod->rspdma; in nvmet_fc_handle_ls_rqst()
1944 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; in nvmet_fc_handle_ls_rqst()
1946 iod->lsrsp->rsplen = 0; in nvmet_fc_handle_ls_rqst()
1948 iod->assoc = NULL; in nvmet_fc_handle_ls_rqst()
1955 switch (w0->ls_cmd) { in nvmet_fc_handle_ls_rqst()
1969 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, in nvmet_fc_handle_ls_rqst()
1970 sizeof(*iod->rspbuf), w0->ls_cmd, in nvmet_fc_handle_ls_rqst()
1979 * Actual processing routine for received FC-NVME LS Requests from the LLD
1986 struct nvmet_fc_tgtport *tgtport = iod->tgtport; in nvmet_fc_handle_ls_rqst_work()
1993 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1996 * The nvmet-fc layer will copy payload to an internal structure for
2020 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2022 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvmet_fc_rcv_ls_req()
2023 nvmefc_ls_names[w0->ls_cmd] : "", in nvmet_fc_rcv_ls_req()
2025 return -E2BIG; in nvmet_fc_rcv_ls_req()
2029 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2031 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvmet_fc_rcv_ls_req()
2032 nvmefc_ls_names[w0->ls_cmd] : ""); in nvmet_fc_rcv_ls_req()
2033 return -ESHUTDOWN; in nvmet_fc_rcv_ls_req()
2038 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2040 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvmet_fc_rcv_ls_req()
2041 nvmefc_ls_names[w0->ls_cmd] : ""); in nvmet_fc_rcv_ls_req()
2043 return -ENOENT; in nvmet_fc_rcv_ls_req()
2046 iod->lsrsp = lsrsp; in nvmet_fc_rcv_ls_req()
2047 iod->fcpreq = NULL; in nvmet_fc_rcv_ls_req()
2048 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); in nvmet_fc_rcv_ls_req()
2049 iod->rqstdatalen = lsreqbuf_len; in nvmet_fc_rcv_ls_req()
2050 iod->hosthandle = hosthandle; in nvmet_fc_rcv_ls_req()
2052 schedule_work(&iod->work); in nvmet_fc_rcv_ls_req()
2071 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); in nvmet_fc_alloc_tgt_pgs()
2075 fod->data_sg = sg; in nvmet_fc_alloc_tgt_pgs()
2076 fod->data_sg_cnt = nent; in nvmet_fc_alloc_tgt_pgs()
2077 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, in nvmet_fc_alloc_tgt_pgs()
2078 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_alloc_tgt_pgs()
2081 fod->next_sg = fod->data_sg; in nvmet_fc_alloc_tgt_pgs()
2092 if (!fod->data_sg || !fod->data_sg_cnt) in nvmet_fc_free_tgt_pgs()
2095 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, in nvmet_fc_free_tgt_pgs()
2096 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_free_tgt_pgs()
2098 sgl_free(fod->data_sg); in nvmet_fc_free_tgt_pgs()
2099 fod->data_sg = NULL; in nvmet_fc_free_tgt_pgs()
2100 fod->data_sg_cnt = 0; in nvmet_fc_free_tgt_pgs()
2110 sqtail = atomic_read(&q->sqtail) % q->sqsize; in queue_90percent_full()
2112 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); in queue_90percent_full()
2113 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); in queue_90percent_full()
2124 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; in nvmet_fc_prep_fcp_rsp()
2125 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in nvmet_fc_prep_fcp_rsp()
2126 struct nvme_completion *cqe = &ersp->cqe; in nvmet_fc_prep_fcp_rsp() local
2127 u32 *cqewd = (u32 *)cqe; in nvmet_fc_prep_fcp_rsp()
2131 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) in nvmet_fc_prep_fcp_rsp()
2132 xfr_length = fod->req.transfer_len; in nvmet_fc_prep_fcp_rsp()
2134 xfr_length = fod->offset; in nvmet_fc_prep_fcp_rsp()
2138 * Note: to send a 0's response, the NVME-FC host transport will in nvmet_fc_prep_fcp_rsp()
2139 * recreate the CQE. The host transport knows: sq id, SQHD (last in nvmet_fc_prep_fcp_rsp()
2141 * zero-filled CQE with those known fields filled in. Transport in nvmet_fc_prep_fcp_rsp()
2142 * must send an ersp for any condition where the cqe won't match in nvmet_fc_prep_fcp_rsp()
2145 * Here are the FC-NVME mandated cases where we must send an ersp: in nvmet_fc_prep_fcp_rsp()
2147 * force fabric commands to send ersp's (not in FC-NVME but good in nvmet_fc_prep_fcp_rsp()
2149 * normal cmds: any time status is non-zero, or status is zero in nvmet_fc_prep_fcp_rsp()
2150 * but words 0 or 1 are non-zero. in nvmet_fc_prep_fcp_rsp()
2155 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); in nvmet_fc_prep_fcp_rsp()
2156 if (!(rspcnt % fod->queue->ersp_ratio) || in nvmet_fc_prep_fcp_rsp()
2158 xfr_length != fod->req.transfer_len || in nvmet_fc_prep_fcp_rsp()
2159 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || in nvmet_fc_prep_fcp_rsp()
2160 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || in nvmet_fc_prep_fcp_rsp()
2161 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) in nvmet_fc_prep_fcp_rsp()
2164 /* re-set the fields */ in nvmet_fc_prep_fcp_rsp()
2165 fod->fcpreq->rspaddr = ersp; in nvmet_fc_prep_fcp_rsp()
2166 fod->fcpreq->rspdma = fod->rspdma; in nvmet_fc_prep_fcp_rsp()
2170 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; in nvmet_fc_prep_fcp_rsp()
2172 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); in nvmet_fc_prep_fcp_rsp()
2173 rsn = atomic_inc_return(&fod->queue->rsn); in nvmet_fc_prep_fcp_rsp()
2174 ersp->rsn = cpu_to_be32(rsn); in nvmet_fc_prep_fcp_rsp()
2175 ersp->xfrd_len = cpu_to_be32(xfr_length); in nvmet_fc_prep_fcp_rsp()
2176 fod->fcpreq->rsplen = sizeof(*ersp); in nvmet_fc_prep_fcp_rsp()
2179 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_rsp()
2180 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_rsp()
2189 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_abort_op()
2198 /* no need to take lock - lock was taken earlier to get here */ in nvmet_fc_abort_op()
2199 if (!fod->aborted) in nvmet_fc_abort_op()
2200 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); in nvmet_fc_abort_op()
2202 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_abort_op()
2211 fod->fcpreq->op = NVMET_FCOP_RSP; in nvmet_fc_xmt_fcp_rsp()
2212 fod->fcpreq->timeout = 0; in nvmet_fc_xmt_fcp_rsp()
2216 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_xmt_fcp_rsp()
2225 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_transfer_fcp_data()
2226 struct scatterlist *sg = fod->next_sg; in nvmet_fc_transfer_fcp_data()
2228 u32 remaininglen = fod->req.transfer_len - fod->offset; in nvmet_fc_transfer_fcp_data()
2232 fcpreq->op = op; in nvmet_fc_transfer_fcp_data()
2233 fcpreq->offset = fod->offset; in nvmet_fc_transfer_fcp_data()
2234 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; in nvmet_fc_transfer_fcp_data()
2245 fcpreq->sg = sg; in nvmet_fc_transfer_fcp_data()
2246 fcpreq->sg_cnt = 0; in nvmet_fc_transfer_fcp_data()
2248 fcpreq->sg_cnt < tgtport->max_sg_cnt && in nvmet_fc_transfer_fcp_data()
2250 fcpreq->sg_cnt++; in nvmet_fc_transfer_fcp_data()
2254 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { in nvmet_fc_transfer_fcp_data()
2255 fcpreq->sg_cnt++; in nvmet_fc_transfer_fcp_data()
2260 fod->next_sg = sg; in nvmet_fc_transfer_fcp_data()
2262 fod->next_sg = NULL; in nvmet_fc_transfer_fcp_data()
2264 fcpreq->transfer_length = tlen; in nvmet_fc_transfer_fcp_data()
2265 fcpreq->transferred_length = 0; in nvmet_fc_transfer_fcp_data()
2266 fcpreq->fcp_error = 0; in nvmet_fc_transfer_fcp_data()
2267 fcpreq->rsplen = 0; in nvmet_fc_transfer_fcp_data()
2270 * If the last READDATA request: check if LLDD supports in nvmet_fc_transfer_fcp_data()
2274 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && in nvmet_fc_transfer_fcp_data()
2275 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { in nvmet_fc_transfer_fcp_data()
2276 fcpreq->op = NVMET_FCOP_READDATA_RSP; in nvmet_fc_transfer_fcp_data()
2280 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2287 fod->abort = true; in nvmet_fc_transfer_fcp_data()
2290 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2291 fod->writedataactive = false; in nvmet_fc_transfer_fcp_data()
2292 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2293 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_transfer_fcp_data()
2295 fcpreq->fcp_error = ret; in nvmet_fc_transfer_fcp_data()
2296 fcpreq->transferred_length = 0; in nvmet_fc_transfer_fcp_data()
2297 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2305 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in __nvmet_fc_fod_op_abort()
2306 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in __nvmet_fc_fod_op_abort()
2310 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { in __nvmet_fc_fod_op_abort()
2311 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in __nvmet_fc_fod_op_abort()
2328 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_fod_op_done()
2329 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fod_op_done()
2333 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2334 abort = fod->abort; in nvmet_fc_fod_op_done()
2335 fod->writedataactive = false; in nvmet_fc_fod_op_done()
2336 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2338 switch (fcpreq->op) { in nvmet_fc_fod_op_done()
2343 if (fcpreq->fcp_error || in nvmet_fc_fod_op_done()
2344 fcpreq->transferred_length != fcpreq->transfer_length) { in nvmet_fc_fod_op_done()
2345 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2346 fod->abort = true; in nvmet_fc_fod_op_done()
2347 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2349 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_fod_op_done()
2353 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2354 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2355 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2356 fod->writedataactive = true; in nvmet_fc_fod_op_done()
2357 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2366 fod->req.execute(&fod->req); in nvmet_fc_fod_op_done()
2373 if (fcpreq->fcp_error || in nvmet_fc_fod_op_done()
2374 fcpreq->transferred_length != fcpreq->transfer_length) { in nvmet_fc_fod_op_done()
2381 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { in nvmet_fc_fod_op_done()
2384 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2388 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2389 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2408 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2419 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_xmt_fcp_op_done()
2431 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in __nvmet_fc_fcp_nvme_cmd_done()
2432 struct nvme_completion *cqe = &fod->rspiubuf.cqe; in __nvmet_fc_fcp_nvme_cmd_done() local
2436 spin_lock_irqsave(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2437 abort = fod->abort; in __nvmet_fc_fcp_nvme_cmd_done()
2438 spin_unlock_irqrestore(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2440 /* if we have a CQE, snoop the last sq_head value */ in __nvmet_fc_fcp_nvme_cmd_done()
2442 fod->queue->sqhd = cqe->sq_head; in __nvmet_fc_fcp_nvme_cmd_done()
2451 /* fudge up a failed CQE status for our transport error */ in __nvmet_fc_fcp_nvme_cmd_done()
2452 memset(cqe, 0, sizeof(*cqe)); in __nvmet_fc_fcp_nvme_cmd_done()
2453 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ in __nvmet_fc_fcp_nvme_cmd_done()
2454 cqe->sq_id = cpu_to_le16(fod->queue->qid); in __nvmet_fc_fcp_nvme_cmd_done()
2455 cqe->command_id = sqe->command_id; in __nvmet_fc_fcp_nvme_cmd_done()
2456 cqe->status = cpu_to_le16(status); in __nvmet_fc_fcp_nvme_cmd_done()
2460 * try to push the data even if the SQE status is non-zero. in __nvmet_fc_fcp_nvme_cmd_done()
2464 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { in __nvmet_fc_fcp_nvme_cmd_done()
2471 /* writes & no data - fall thru */ in __nvmet_fc_fcp_nvme_cmd_done()
2485 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fcp_nvme_cmd_done()
2492 * Actual processing routine for received FC-NVME I/O Requests from the LLD
2498 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; in nvmet_fc_handle_fcp_rqst()
2499 u32 xfrlen = be32_to_cpu(cmdiu->data_len); in nvmet_fc_handle_fcp_rqst()
2506 if (!tgtport->pe) in nvmet_fc_handle_fcp_rqst()
2518 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; in nvmet_fc_handle_fcp_rqst()
2520 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { in nvmet_fc_handle_fcp_rqst()
2521 fod->io_dir = NVMET_FCP_WRITE; in nvmet_fc_handle_fcp_rqst()
2522 if (!nvme_is_write(&cmdiu->sqe)) in nvmet_fc_handle_fcp_rqst()
2524 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { in nvmet_fc_handle_fcp_rqst()
2525 fod->io_dir = NVMET_FCP_READ; in nvmet_fc_handle_fcp_rqst()
2526 if (nvme_is_write(&cmdiu->sqe)) in nvmet_fc_handle_fcp_rqst()
2529 fod->io_dir = NVMET_FCP_NODATA; in nvmet_fc_handle_fcp_rqst()
2534 fod->req.cmd = &fod->cmdiubuf.sqe; in nvmet_fc_handle_fcp_rqst()
2535 fod->req.cqe = &fod->rspiubuf.cqe; in nvmet_fc_handle_fcp_rqst()
2536 fod->req.port = tgtport->pe->port; in nvmet_fc_handle_fcp_rqst()
2539 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); in nvmet_fc_handle_fcp_rqst()
2541 fod->data_sg = NULL; in nvmet_fc_handle_fcp_rqst()
2542 fod->data_sg_cnt = 0; in nvmet_fc_handle_fcp_rqst()
2544 ret = nvmet_req_init(&fod->req, in nvmet_fc_handle_fcp_rqst()
2545 &fod->queue->nvme_cq, in nvmet_fc_handle_fcp_rqst()
2546 &fod->queue->nvme_sq, in nvmet_fc_handle_fcp_rqst()
2554 fod->req.transfer_len = xfrlen; in nvmet_fc_handle_fcp_rqst()
2557 atomic_inc(&fod->queue->sqtail); in nvmet_fc_handle_fcp_rqst()
2559 if (fod->req.transfer_len) { in nvmet_fc_handle_fcp_rqst()
2562 nvmet_req_complete(&fod->req, ret); in nvmet_fc_handle_fcp_rqst()
2566 fod->req.sg = fod->data_sg; in nvmet_fc_handle_fcp_rqst()
2567 fod->req.sg_cnt = fod->data_sg_cnt; in nvmet_fc_handle_fcp_rqst()
2568 fod->offset = 0; in nvmet_fc_handle_fcp_rqst()
2570 if (fod->io_dir == NVMET_FCP_WRITE) { in nvmet_fc_handle_fcp_rqst()
2582 fod->req.execute(&fod->req); in nvmet_fc_handle_fcp_rqst()
2590 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2593 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2606 * asynchronously received - its possible for a command to be received
2613 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2619 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2625 * transport will return a non-zero status indicating the error.
2626 * In all cases other than -EOVERFLOW, the transport has not accepted the
2650 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || in nvmet_fc_rcv_fcp_req()
2651 (cmdiu->fc_id != NVME_CMD_FC_ID) || in nvmet_fc_rcv_fcp_req()
2652 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) in nvmet_fc_rcv_fcp_req()
2653 return -EIO; in nvmet_fc_rcv_fcp_req()
2656 be64_to_cpu(cmdiu->connection_id)); in nvmet_fc_rcv_fcp_req()
2658 return -ENOTCONN; in nvmet_fc_rcv_fcp_req()
2667 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2671 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2673 fcpreq->nvmet_fc_private = fod; in nvmet_fc_rcv_fcp_req()
2674 fod->fcpreq = fcpreq; in nvmet_fc_rcv_fcp_req()
2676 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); in nvmet_fc_rcv_fcp_req()
2683 if (!tgtport->ops->defer_rcv) { in nvmet_fc_rcv_fcp_req()
2684 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2687 return -ENOENT; in nvmet_fc_rcv_fcp_req()
2690 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, in nvmet_fc_rcv_fcp_req()
2693 /* Just re-use one that was previously allocated */ in nvmet_fc_rcv_fcp_req()
2694 list_del(&deferfcp->req_list); in nvmet_fc_rcv_fcp_req()
2696 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2703 return -ENOMEM; in nvmet_fc_rcv_fcp_req()
2705 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2709 fcpreq->rspaddr = cmdiubuf; in nvmet_fc_rcv_fcp_req()
2710 fcpreq->rsplen = cmdiubuf_len; in nvmet_fc_rcv_fcp_req()
2711 deferfcp->fcp_req = fcpreq; in nvmet_fc_rcv_fcp_req()
2714 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); in nvmet_fc_rcv_fcp_req()
2718 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2720 return -EOVERFLOW; in nvmet_fc_rcv_fcp_req()
2725 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2731 * (template_ops->fcp_req_release() has not been called).
2751 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_rcv_fcp_abort()
2755 if (!fod || fod->fcpreq != fcpreq) in nvmet_fc_rcv_fcp_abort()
2759 queue = fod->queue; in nvmet_fc_rcv_fcp_abort()
2761 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_rcv_fcp_abort()
2762 if (fod->active) { in nvmet_fc_rcv_fcp_abort()
2768 spin_lock(&fod->flock); in nvmet_fc_rcv_fcp_abort()
2769 fod->abort = true; in nvmet_fc_rcv_fcp_abort()
2770 fod->aborted = true; in nvmet_fc_rcv_fcp_abort()
2771 spin_unlock(&fod->flock); in nvmet_fc_rcv_fcp_abort()
2773 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_abort()
2789 return -EINVAL; in __nvme_fc_parse_u64()
2804 substring_t wwn = { name, &name[sizeof(name)-1] }; in nvme_fc_parse_traddr()
2809 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && in nvme_fc_parse_traddr()
2811 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { in nvme_fc_parse_traddr()
2816 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && in nvme_fc_parse_traddr()
2818 "pn-", NVME_FC_TRADDR_NNLEN))) { in nvme_fc_parse_traddr()
2829 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) in nvme_fc_parse_traddr()
2833 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) in nvme_fc_parse_traddr()
2840 return -EINVAL; in nvme_fc_parse_traddr()
2853 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || in nvmet_fc_add_port()
2854 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) in nvmet_fc_add_port()
2855 return -EINVAL; in nvmet_fc_add_port()
2859 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, in nvmet_fc_add_port()
2860 sizeof(port->disc_addr.traddr)); in nvmet_fc_add_port()
2866 return -ENOMEM; in nvmet_fc_add_port()
2868 ret = -ENXIO; in nvmet_fc_add_port()
2871 if ((tgtport->fc_target_port.node_name == traddr.nn) && in nvmet_fc_add_port()
2872 (tgtport->fc_target_port.port_name == traddr.pn)) { in nvmet_fc_add_port()
2874 if (!tgtport->pe) { in nvmet_fc_add_port()
2878 ret = -EALREADY; in nvmet_fc_add_port()
2893 struct nvmet_fc_port_entry *pe = port->priv; in nvmet_fc_remove_port()
2903 struct nvmet_fc_port_entry *pe = port->priv; in nvmet_fc_discovery_chg()
2904 struct nvmet_fc_tgtport *tgtport = pe->tgtport; in nvmet_fc_discovery_chg()
2906 if (tgtport && tgtport->ops->discovery_event) in nvmet_fc_discovery_chg()
2907 tgtport->ops->discovery_event(&tgtport->fc_target_port); in nvmet_fc_discovery_chg()
2928 /* sanity check - all lports should be removed */ in nvmet_fc_exit_module()