Lines Matching refs:lport
133 struct list_head endp_list; /* for lport->endp_list */
139 struct nvme_fc_lport *lport;
156 struct nvme_fc_lport *lport;
242 struct nvme_fc_lport *lport =
246 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
247 WARN_ON(!list_empty(&lport->endp_list));
251 list_del(&lport->port_list);
254 ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
255 ida_destroy(&lport->endp_cnt);
257 put_device(lport->dev);
259 kfree(lport);
263 nvme_fc_lport_put(struct nvme_fc_lport *lport)
265 kref_put(&lport->ref, nvme_fc_free_lport);
269 nvme_fc_lport_get(struct nvme_fc_lport *lport)
271 return kref_get_unless_zero(&lport->ref);
280 struct nvme_fc_lport *lport;
285 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
286 if (lport->localport.node_name != pinfo->node_name ||
287 lport->localport.port_name != pinfo->port_name)
290 if (lport->dev != dev) {
291 lport = ERR_PTR(-EXDEV);
295 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
296 lport = ERR_PTR(-EEXIST);
300 if (!nvme_fc_lport_get(lport)) {
303 * act as if lport already deleted
305 lport = NULL;
309 /* resume the lport */
311 lport->ops = ops;
312 lport->localport.port_role = pinfo->port_role;
313 lport->localport.port_id = pinfo->port_id;
314 lport->localport.port_state = FC_OBJSTATE_ONLINE;
318 return lport;
321 lport = NULL;
326 return lport;
374 /* found an lport, but something about its state is bad */
379 /* found existing lport, which was resumed */
457 struct nvme_fc_lport *lport = localport_to_lport(portptr);
473 if (atomic_read(&lport->act_rport_cnt) == 0)
474 lport->ops->localport_delete(&lport->localport);
476 nvme_fc_lport_put(lport);
493 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
505 lport->localport.node_name, lport->localport.port_name);
517 struct nvme_fc_lport *lport =
524 /* remove from lport list */
530 ida_free(&lport->endp_cnt, rport->remoteport.port_num);
534 nvme_fc_lport_put(lport);
581 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
590 list_for_each_entry(rport, &lport->endp_list, endp_list) {
668 struct nvme_fc_lport *lport = localport_to_lport(localport);
673 if (!nvme_fc_lport_get(lport)) {
683 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
692 nvme_fc_lport_put(lport);
694 nvme_fc_signal_discovery_scan(lport, newrec);
701 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
708 idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL);
721 newrec->remoteport.localport = &lport->localport;
723 newrec->dev = lport->dev;
724 newrec->lport = lport;
725 if (lport->ops->remote_priv_sz)
739 list_add_tail(&newrec->endp_list, &lport->endp_list);
742 nvme_fc_signal_discovery_scan(lport, newrec);
750 nvme_fc_lport_put(lport);
770 rport->lport->ops->ls_abort(&rport->lport->localport,
839 rport->lport->ops->remoteport_delete(portptr);
865 nvme_fc_signal_discovery_scan(rport->lport, rport);
1053 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1140 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1152 if (ctrl->lport->ops->lsrqst_priv_sz)
1256 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1268 if (ctrl->lport->ops->lsrqst_priv_sz)
1386 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1398 if (ctrl->lport->ops->lsrqst_priv_sz)
1416 struct nvme_fc_lport *lport = rport->lport;
1423 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
1425 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1447 struct nvme_fc_lport *lport = rport->lport;
1451 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
1454 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
1457 dev_warn(lport->dev,
1498 dev_info(rport->lport->dev,
1540 dev_info(rport->lport->dev,
1663 void nvme_fc_rcv_ls_req_err_msg(struct nvme_fc_lport *lport,
1666 dev_info(lport->dev, "RCV %s LS failed: No memory\n",
1696 struct nvme_fc_lport *lport = rport->lport;
1705 if (!lport->ops->xmt_ls_rsp) {
1706 dev_info(lport->dev,
1715 dev_info(lport->dev,
1725 nvme_fc_rcv_ls_req_err_msg(lport, w0);
1733 nvme_fc_rcv_ls_req_err_msg(lport, w0);
1738 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
1741 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
1742 dev_info(lport->dev,
1770 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1789 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1791 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1825 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1945 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
2083 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
2085 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2092 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
2095 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2137 if (ctrl->lport->ops->fcprqst_priv_sz) {
2138 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
2263 if (ctrl->lport->ops->delete_queue)
2264 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
2285 if (ctrl->lport->ops->create_queue)
2286 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
2583 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2606 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2707 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2716 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2843 if (ctrl->lport->ops->map_queues)
2844 ctrl->lport->ops->map_queues(&ctrl->lport->localport,
2869 ctrl->lport->ops->max_hw_queues);
2886 ctrl->lport->ops->fcprqst_priv_sz));
2923 ctrl->lport->ops->max_hw_queues);
2970 struct nvme_fc_lport *lport = rport->lport;
2972 atomic_inc(&lport->act_rport_cnt);
2978 struct nvme_fc_lport *lport = rport->lport;
2981 cnt = atomic_dec_return(&lport->act_rport_cnt);
2982 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2983 lport->ops->localport_delete(&lport->localport);
3006 struct nvme_fc_lport *lport = rport->lport;
3014 lport->ops->remoteport_delete(&rport->remoteport);
3044 ctrl->cnum, ctrl->lport->localport.port_name,
3082 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
3417 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3462 ctrl->lport = lport;
3464 ctrl->dev = lport->dev;
3480 lport->ops->max_hw_queues);
3505 if (lport->dev)
3506 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3524 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3530 ctrl = nvme_fc_alloc_ctrl(dev, opts, lport, rport);
3541 ctrl->lport->ops->fcprqst_priv_sz));
3665 struct nvme_fc_lport *lport;
3683 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3684 if (lport->localport.node_name != laddr.nn ||
3685 lport->localport.port_name != laddr.pn ||
3686 lport->localport.port_state != FC_OBJSTATE_ONLINE)
3689 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3701 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3731 struct nvme_fc_lport *lport;
3737 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3738 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3739 if (!nvme_fc_lport_get(lport))
3746 * Revert the lport put and retry. Anything
3751 nvme_fc_lport_put(lport);
3773 lport = rport->lport;
3775 nvme_fc_signal_discovery_scan(lport, rport);
3777 nvme_fc_lport_put(lport);
3935 struct nvme_fc_lport *lport;
3940 list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
3941 list_for_each_entry(rport, &lport->endp_list, endp_list)