Lines Matching defs:ndlp

79 lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp)
81 if (ndlp->nlp_fc4_type ||
82 ndlp->nlp_type & NLP_FABRIC)
96 struct lpfc_nodelist *ndlp;
116 ndlp = rdata->pnode;
118 pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
123 if (!ndlp->vport) {
124 pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px "
125 "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport,
136 struct lpfc_nodelist *ndlp;
143 ndlp = rdata->pnode;
144 vport = ndlp->vport;
147 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
149 if (ndlp->nlp_sid != NLP_NO_SID)
150 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
159 struct lpfc_nodelist *ndlp;
166 ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
167 if (!ndlp)
170 vport = ndlp->vport;
175 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
177 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
180 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
181 vport->load_flag, kref_read(&ndlp->kref),
182 ndlp->nlp_state, ndlp->fc4_xpt_flags);
189 spin_lock_irqsave(&ndlp->lock, iflags);
190 ndlp->rport = NULL;
196 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
197 if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
203 if (ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
204 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
212 if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
213 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
214 ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
215 spin_unlock_irqrestore(&ndlp->lock, iflags);
218 lpfc_nlp_put(ndlp);
220 spin_unlock_irqrestore(&ndlp->lock, iflags);
223 spin_unlock_irqrestore(&ndlp->lock, iflags);
227 lpfc_nlp_put(ndlp);
231 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
235 if (ndlp->rport != rport) {
237 "6788 fc rport mismatch: d_id x%06x ndlp x%px "
240 ndlp->nlp_DID, ndlp, rport, ndlp->rport,
241 ndlp->nlp_state, kref_read(&ndlp->kref));
245 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
249 wwn_to_u64(ndlp->nlp_portname.u.wwn));
251 evtp = &ndlp->dev_loss_evt;
260 set_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
262 spin_lock_irqsave(&ndlp->lock, iflags);
266 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
267 clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
271 * rport. Remove the association between rport and ndlp.
273 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
275 ndlp->rport = NULL;
276 spin_unlock_irqrestore(&ndlp->lock, iflags);
282 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
294 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
297 "%d\n", __func__, ndlp->nlp_DID,
298 ndlp->rport, ndlp->nlp_flag,
299 vport->load_flag, kref_read(&ndlp->kref));
300 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
302 clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
303 lpfc_disc_state_machine(vport, ndlp, NULL,
414 * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss
416 * @ndlp: Pointer to remote node object.
424 struct lpfc_nodelist *ndlp)
426 if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) {
427 lpfc_nlp_get(ndlp);
430 "refcnt %d ndlp %p flag x%lx "
432 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
433 ndlp->nlp_flag, vport->port_state);
439 * @ndlp: Pointer to remote node object.
443 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
445 * when devloss timeout happened to this @ndlp.
448 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
459 vport = ndlp->vport;
460 name = (uint8_t *)&ndlp->nlp_portname;
468 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid);
470 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
472 __func__, ndlp->nlp_DID, ndlp->nlp_flag,
473 ndlp->fc4_xpt_flags, kref_read(&ndlp->kref));
476 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
483 ndlp->nlp_DID);
485 clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
490 if (ndlp->nlp_type & NLP_FABRIC) {
491 spin_lock_irqsave(&ndlp->lock, iflags);
497 switch (ndlp->nlp_DID) {
513 if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag))
519 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
520 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
528 if (ndlp->nlp_DID & Fabric_DID_MASK) {
529 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
530 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
535 spin_unlock_irqrestore(&ndlp->lock, iflags);
541 clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
546 "DID x%x refcnt %d ndlp %p "
548 ndlp->nlp_DID, kref_read(&ndlp->kref),
549 ndlp, ndlp->nlp_flag,
551 set_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags);
553 } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
561 "DID x%x refcnt %d ndlp %p "
563 ndlp->nlp_DID, kref_read(&ndlp->kref),
564 ndlp, ndlp->nlp_flag,
569 lpfc_nlp_put(ndlp);
573 if (ndlp->nlp_sid != NLP_NO_SID) {
575 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
585 ndlp->nlp_DID, ndlp->nlp_flag,
586 ndlp->nlp_state, ndlp->nlp_rpi,
587 kref_read(&ndlp->kref));
595 ndlp->nlp_DID, ndlp->nlp_flag,
596 ndlp->nlp_state, ndlp->nlp_rpi);
598 clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
601 * ndlp, don't issue a NLP_EVT_DEVICE_RM event.
603 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
604 ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) {
608 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
609 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
647 * timeout handler and releasing the reference count for the ndlp with
833 struct lpfc_nodelist *ndlp;
848 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
850 lpfc_els_retry_delay_handler(ndlp);
851 free_evt = 0; /* evt is part of ndlp */
856 lpfc_nlp_put(ndlp);
859 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
860 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
865 nlp_did = ndlp->nlp_DID;
866 lpfc_nlp_put(ndlp);
873 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
875 lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
881 lpfc_nlp_put(ndlp);
1163 struct lpfc_nodelist *ndlp, *next_ndlp;
1165 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1168 ((ndlp->nlp_DID == NameServer_DID) ||
1169 (ndlp->nlp_DID == FDMI_DID) ||
1170 (ndlp->nlp_DID == Fabric_Cntl_DID))))
1171 lpfc_unreg_rpi(vport, ndlp);
1175 (!remove && ndlp->nlp_type & NLP_FABRIC))
1180 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
1181 lpfc_nvmet_invalidate_host(phba, ndlp);
1183 lpfc_disc_state_machine(vport, ndlp, NULL,
1261 /* Decrement the held ndlp if there is a deferred flogi acc */
1263 if (phba->defer_flogi_acc.ndlp) {
1264 lpfc_nlp_put(phba->defer_flogi_acc.ndlp);
1265 phba->defer_flogi_acc.ndlp = NULL;
1353 struct lpfc_nodelist *ndlp;
1355 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1356 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
1358 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1360 if (ndlp->nlp_type & NLP_FABRIC) {
1361 /* On Linkup its safe to clean up the ndlp
1364 if (ndlp->nlp_DID != Fabric_DID)
1365 lpfc_unreg_rpi(vport, ndlp);
1366 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1367 } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) {
1371 lpfc_unreg_rpi(vport, ndlp);
3223 struct lpfc_nodelist *ndlp;
3237 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3238 if (!ndlp)
3244 lpfc_register_new_vport(phba, vport, ndlp);
3862 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
3873 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3874 kref_read(&ndlp->kref),
3875 ndlp);
3876 clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
3878 if (test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) ||
3879 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3888 clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag);
3892 * if we go thru discovery again for this ndlp
3895 set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
3896 lpfc_unreg_rpi(vport, ndlp);
3900 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
3907 lpfc_nlp_put(ndlp);
4176 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
4191 /* Decrement the reference count to ndlp after the
4192 * reference to the ndlp are done.
4194 lpfc_nlp_put(ndlp);
4199 /* Decrement the reference count to ndlp after the reference
4200 * to the ndlp are done.
4202 lpfc_nlp_put(ndlp);
4207 ndlp->nlp_rpi = mb->un.varWords[0];
4208 set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
4209 ndlp->nlp_type |= NLP_FABRIC;
4210 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4223 * all the current reference to the ndlp have been done.
4225 lpfc_nlp_put(ndlp);
4314 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
4330 lpfc_nlp_put(ndlp);
4338 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
4339 clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
4340 lpfc_nlp_put(ndlp);
4359 ndlp->nlp_rpi = mb->un.varWords[0];
4360 set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
4361 ndlp->nlp_type |= NLP_FABRIC;
4362 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4365 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4366 kref_read(&ndlp->kref),
4367 ndlp);
4421 lpfc_nlp_put(ndlp);
4436 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
4446 lpfc_check_nlp_post_devloss(vport, ndlp);
4449 ndlp->nlp_rpi = mb->un.varWords[0];
4453 __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
4454 ndlp->nlp_state);
4456 set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
4457 clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
4458 ndlp->nlp_type |= NLP_FABRIC;
4459 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4465 * all the current reference to the ndlp have been done.
4467 lpfc_nlp_put(ndlp);
4471 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4484 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
4485 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
4486 rport_ids.port_id = ndlp->nlp_DID;
4492 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4498 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
4506 rport->maxframe_size = ndlp->nlp_maxframe;
4507 rport->supported_classes = ndlp->nlp_class_sup;
4509 rdata->pnode = lpfc_nlp_get(ndlp);
4514 ndlp->rport = NULL;
4518 spin_lock_irqsave(&ndlp->lock, flags);
4519 ndlp->fc4_xpt_flags |= SCSI_XPT_REGD;
4520 spin_unlock_irqrestore(&ndlp->lock, flags);
4522 if (ndlp->nlp_type & NLP_FCP_TARGET)
4524 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
4526 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
4528 if (ndlp->nlp_type & NLP_NVME_TARGET)
4530 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
4536 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4539 kref_read(&ndlp->kref));
4543 ndlp->nlp_sid = rport->scsi_target_id;
4550 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
4552 struct fc_rport *rport = ndlp->rport;
4553 struct lpfc_vport *vport = ndlp->vport;
4560 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4565 ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags,
4566 kref_read(&ndlp->kref));
4569 lpfc_nlp_put(ndlp);
4608 lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4612 lpfc_check_nlp_post_devloss(vport, ndlp);
4614 spin_lock_irqsave(&ndlp->lock, iflags);
4615 if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
4617 spin_unlock_irqrestore(&ndlp->lock, iflags);
4619 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD &&
4620 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) {
4621 lpfc_nvme_rescan_port(vport, ndlp);
4626 ndlp->fc4_xpt_flags |= NLP_XPT_REGD;
4627 spin_unlock_irqrestore(&ndlp->lock, iflags);
4629 if (lpfc_valid_xpt_node(ndlp)) {
4635 lpfc_register_remote_port(vport, ndlp);
4639 if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME))
4644 ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4650 if (ndlp->nlp_type & NLP_NVME_TARGET) {
4652 lpfc_nvme_register_port(vport, ndlp);
4658 lpfc_nlp_get(ndlp);
4665 lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4669 spin_lock_irqsave(&ndlp->lock, iflags);
4670 if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
4671 spin_unlock_irqrestore(&ndlp->lock, iflags);
4674 "0999 %s Not regd: ndlp x%px rport x%px DID "
4676 __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
4677 ndlp->nlp_flag, ndlp->fc4_xpt_flags);
4681 ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
4682 spin_unlock_irqrestore(&ndlp->lock, iflags);
4684 if (ndlp->rport &&
4685 ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
4687 lpfc_unregister_remote_port(ndlp);
4688 } else if (!ndlp->rport) {
4693 __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
4694 ndlp->fc4_xpt_flags,
4695 kref_read(&ndlp->kref));
4698 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
4701 lpfc_nvme_unregister_port(vport, ndlp);
4704 lpfc_nlp_put(ndlp);
4714 lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4731 ndlp->nlp_type |= NLP_FC_NODE;
4734 clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
4735 lpfc_nlp_reg_node(vport, ndlp);
4745 clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag);
4748 lpfc_nlp_unreg_node(vport, ndlp);
4755 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4761 lpfc_handle_adisc_state(vport, ndlp, new_state);
4766 clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
4767 ndlp->nlp_type |= NLP_FC_NODE;
4770 clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
4772 clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag);
4780 if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag) ||
4782 lpfc_nlp_unreg_node(vport, ndlp);
4787 lpfc_nlp_reg_node(vport, ndlp);
4796 (ndlp->nlp_type & NLP_FCP_TARGET) &&
4797 (!ndlp->rport ||
4798 ndlp->rport->scsi_target_id == -1 ||
4799 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
4800 set_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag);
4801 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4828 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4831 int old_state = ndlp->nlp_state;
4832 bool node_dropped = test_bit(NLP_DROPPED, &ndlp->nlp_flag);
4838 ndlp->nlp_DID,
4844 ndlp->nlp_DID, old_state, state);
4848 clear_bit(NLP_DROPPED, &ndlp->nlp_flag);
4849 lpfc_nlp_get(ndlp);
4854 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4856 clear_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag);
4857 ndlp->nlp_type &= ~NLP_FC_NODE;
4860 if (list_empty(&ndlp->nlp_listp)) {
4862 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4867 ndlp->nlp_state = state;
4869 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4873 lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4877 if (list_empty(&ndlp->nlp_listp)) {
4879 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4885 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4889 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4890 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4891 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4893 list_del_init(&ndlp->nlp_listp);
4895 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4902 * @ndlp: Pointer to FC node object.
4907 * to phba from @ndlp can be obtained indirectly through it's reference to
4908 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4909 * to the life-span of the @ndlp might go beyond the existence of @vport as
4910 * the final release of ndlp is determined by its reference count. And, the
4911 * operation on @ndlp needs the reference to phba.
4914 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4917 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4918 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4919 timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
4920 INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
4922 ndlp->nlp_DID = did;
4923 ndlp->vport = vport;
4924 ndlp->phba = vport->phba;
4925 ndlp->nlp_sid = NLP_NO_SID;
4926 ndlp->nlp_fc4_type = NLP_FC4_NONE;
4927 kref_init(&ndlp->kref);
4928 atomic_set(&ndlp->cmd_pending, 0);
4929 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4930 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4934 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4939 * release the ndlp from the vport when conditions are correct.
4941 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4943 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4945 lpfc_cleanup_vports_rrqs(vport, ndlp);
4946 lpfc_unreg_rpi(vport, ndlp);
4953 if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
4954 lpfc_nlp_put(ndlp);
5038 struct lpfc_nodelist *ndlp)
5040 struct lpfc_vport *vport = ndlp->vport;
5055 if (iocb->ndlp == ndlp)
5059 if (remote_id == ndlp->nlp_DID)
5063 if (iocb->ndlp == ndlp)
5068 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
5069 test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag))
5072 if (ulp_context == ndlp->nlp_rpi)
5080 struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
5087 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
5095 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
5102 __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
5109 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
5120 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
5131 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5135 lpfc_fabric_abort_nport(ndlp);
5141 if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) {
5143 lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
5145 lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
5167 struct lpfc_nodelist *ndlp;
5169 ndlp = pmb->ctx_ndlp;
5170 if (!ndlp)
5172 lpfc_issue_els_logo(vport, ndlp, 0);
5175 if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) &&
5176 ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) {
5180 ndlp->nlp_rpi, ndlp->nlp_DID,
5181 ndlp->nlp_defer_did, ndlp);
5183 clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
5184 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
5185 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5187 clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
5194 lpfc_nlp_put(ndlp);
5205 struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
5210 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
5214 if (test_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag)) {
5220 (kref_read(&ndlp->kref) > 0)) {
5237 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5244 if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) ||
5245 test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) {
5246 if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag))
5252 ndlp->nlp_rpi, ndlp->nlp_flag,
5253 ndlp->nlp_DID);
5255 /* If there is already an UNREG in progress for this ndlp,
5258 if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) {
5264 ndlp->nlp_rpi, ndlp->nlp_DID,
5265 ndlp->nlp_defer_did,
5266 ndlp->nlp_flag, ndlp);
5273 rpi = ndlp->nlp_rpi;
5275 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
5279 lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
5290 set_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
5297 ndlp->nlp_rpi, ndlp->nlp_DID,
5298 ndlp->nlp_flag, ndlp);
5302 clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
5305 lpfc_nlp_put(ndlp);
5313 "ndlp x%px\n",
5314 ndlp->nlp_rpi, ndlp->nlp_DID,
5315 ndlp->nlp_flag, ndlp);
5322 clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
5323 lpfc_issue_els_logo(vport, ndlp, 0);
5324 ndlp->nlp_prev_state = ndlp->nlp_state;
5325 lpfc_nlp_set_state(vport, ndlp,
5331 lpfc_no_rpi(phba, ndlp);
5334 ndlp->nlp_rpi = 0;
5335 clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
5336 clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
5338 clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
5341 clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
5356 struct lpfc_nodelist *ndlp;
5368 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5369 if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) {
5373 lpfc_unreg_rpi(vports[i], ndlp);
5449 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5458 ndlp->nlp_DID, ndlp->nlp_flag,
5459 ndlp->nlp_state, ndlp->nlp_rpi);
5460 lpfc_dequeue_node(vport, ndlp);
5464 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
5468 (ndlp == mb->ctx_ndlp)) {
5479 (ndlp != mb->ctx_ndlp))
5489 (ndlp == mb->ctx_ndlp)) {
5500 lpfc_els_abort(phba, ndlp);
5502 clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
5504 ndlp->nlp_last_elscmd = 0;
5505 timer_delete_sync(&ndlp->nlp_delayfunc);
5507 list_del_init(&ndlp->els_retry_evt.evt_listp);
5508 list_del_init(&ndlp->dev_loss_evt.evt_listp);
5509 list_del_init(&ndlp->recovery_evt.evt_listp);
5510 lpfc_cleanup_vports_rrqs(vport, ndlp);
5515 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5524 if (ndlp->nlp_DID == did)
5534 ndlpdid.un.word = ndlp->nlp_DID;
5543 * up matching ndlp->nlp_DID 000001 to
5556 matchdid.un.word = ndlp->nlp_DID;
5573 struct lpfc_nodelist *ndlp;
5577 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5578 if (lpfc_matchdid(vport, ndlp, did)) {
5579 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5580 ((uint32_t)ndlp->nlp_xri << 16) |
5581 ((uint32_t)ndlp->nlp_type << 8)
5586 ndlp, ndlp->nlp_DID,
5587 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
5588 ndlp->active_rrqs_xri_bitmap);
5591 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
5592 return ndlp;
5593 np = ndlp;
5609 struct lpfc_nodelist *ndlp;
5613 ndlp = __lpfc_findnode_did(vport, did);
5615 return ndlp;
5621 struct lpfc_nodelist *ndlp;
5627 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5628 if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
5629 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
5630 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5631 ((uint32_t)ndlp->nlp_xri << 16) |
5632 ((uint32_t)ndlp->nlp_type << 8) |
5633 ((uint32_t)ndlp->nlp_rpi & 0xff));
5639 ndlp, ndlp->nlp_DID,
5640 ndlp->nlp_flag, data1,
5641 ndlp->active_rrqs_xri_bitmap);
5642 return ndlp;
5656 struct lpfc_nodelist *ndlp;
5658 ndlp = lpfc_findnode_did(vport, did);
5659 if (!ndlp) {
5665 ndlp = lpfc_nlp_init(vport, did);
5666 if (!ndlp)
5668 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5673 ndlp->nlp_DID, ndlp->nlp_flag,
5674 ndlp->nlp_state, vport->fc_flag);
5676 set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
5677 return ndlp;
5691 lpfc_cancel_retry_delay_tmo(vport, ndlp);
5696 ndlp->nlp_DID, ndlp->nlp_flag,
5697 ndlp->nlp_state, vport->fc_flag);
5705 return ndlp;
5707 if (ndlp->nlp_state > NLP_STE_UNUSED_NODE &&
5708 ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) {
5709 lpfc_disc_state_machine(vport, ndlp, NULL,
5713 set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
5718 ndlp->nlp_DID, ndlp->nlp_flag,
5719 ndlp->nlp_state, vport->fc_flag);
5720 ndlp = NULL;
5726 ndlp->nlp_DID, ndlp->nlp_flag,
5727 ndlp->nlp_state, vport->fc_flag);
5733 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
5734 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5736 test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)))
5740 return ndlp;
5745 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5746 set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
5748 return ndlp;
5947 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5963 if (iocb->ndlp != ndlp)
5977 if (iocb->ndlp != ndlp)
6000 struct lpfc_nodelist *ndlp, *next_ndlp;
6005 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
6007 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
6008 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
6009 lpfc_free_tx(phba, ndlp);
6020 * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
6028 struct lpfc_nodelist *ndlp, *next_ndlp;
6030 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
6032 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6087 struct lpfc_nodelist *ndlp, *next_ndlp;
6109 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
6111 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
6113 if (ndlp->nlp_type & NLP_FABRIC) {
6114 /* Clean up the ndlp on Fabric connections */
6115 lpfc_drop_node(vport, ndlp);
6117 } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) {
6121 lpfc_unreg_rpi(vport, ndlp);
6160 /* Next look for NameServer ndlp */
6161 ndlp = lpfc_findnode_did(vport, NameServer_DID);
6162 if (ndlp)
6163 lpfc_els_abort(phba, ndlp);
6323 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
6329 ndlp->nlp_rpi = mb->un.varWords[0];
6330 set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
6331 ndlp->nlp_type |= NLP_FABRIC;
6332 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
6335 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
6336 kref_read(&ndlp->kref),
6337 ndlp);
6346 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
6348 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
6355 lpfc_nlp_put(ndlp);
6361 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
6365 return ndlp->nlp_rpi == *rpi;
6369 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
6371 return memcmp(&ndlp->nlp_portname, param,
6372 sizeof(ndlp->nlp_portname)) == 0;
6378 struct lpfc_nodelist *ndlp;
6380 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6381 if (filter(ndlp, param)) {
6384 "ndlp x%px did x%x flg x%lx st x%x "
6386 filter, ndlp, ndlp->nlp_DID,
6387 ndlp->nlp_flag, ndlp->nlp_state,
6388 ndlp->nlp_xri, ndlp->nlp_type,
6389 ndlp->nlp_rpi);
6390 return ndlp;
6399 * This routine looks up the ndlp lists for the given RPI. If rpi found it
6409 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
6416 struct lpfc_nodelist *ndlp;
6419 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
6421 return ndlp;
6425 * This routine looks up the ndlp lists for the given RPI. If the rpi
6433 struct lpfc_nodelist *ndlp;
6437 ndlp = __lpfc_findnode_rpi(vport, rpi);
6439 return ndlp;
6495 struct lpfc_nodelist *ndlp;
6504 ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
6505 if (!ndlp) {
6511 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
6513 spin_lock_init(&ndlp->lock);
6515 lpfc_initialize_node(vport, ndlp, did);
6516 INIT_LIST_HEAD(&ndlp->nlp_listp);
6518 ndlp->nlp_rpi = rpi;
6521 "0007 Init New ndlp x%px, rpi:x%x DID:x%x "
6523 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
6524 ndlp->nlp_flag, kref_read(&ndlp->kref));
6526 ndlp->active_rrqs_xri_bitmap =
6529 if (ndlp->active_rrqs_xri_bitmap)
6530 memset(ndlp->active_rrqs_xri_bitmap, 0,
6531 ndlp->phba->cfg_rrq_xri_bitmap_sz);
6538 ndlp->nlp_DID, 0, 0);
6540 return ndlp;
6543 /* This routine releases all resources associated with a specifc NPort's ndlp
6549 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
6551 struct lpfc_vport *vport = ndlp->vport;
6553 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6555 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6558 "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n",
6559 __func__, ndlp, ndlp->nlp_DID,
6560 kref_read(&ndlp->kref), ndlp->nlp_rpi);
6562 /* remove ndlp from action. */
6563 lpfc_cancel_retry_delay_tmo(vport, ndlp);
6564 lpfc_cleanup_node(vport, ndlp);
6570 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
6571 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
6577 ndlp->vport = NULL;
6578 ndlp->nlp_state = NLP_STE_FREED_NODE;
6579 ndlp->nlp_flag = 0;
6580 ndlp->fc4_xpt_flags = 0;
6582 /* free ndlp memory for final ndlp release */
6583 if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
6584 mempool_free(ndlp->active_rrqs_xri_bitmap,
6585 ndlp->phba->active_rrq_pool);
6586 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
6589 /* This routine bumps the reference count for a ndlp structure to ensure
6590 * that one discovery thread won't free a ndlp while another discovery thread
6594 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6598 if (ndlp) {
6599 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6601 ndlp->nlp_DID, ndlp->nlp_flag,
6602 kref_read(&ndlp->kref));
6604 /* The check of ndlp usage to prevent incrementing the
6605 * ndlp reference count that is in the process of being
6608 spin_lock_irqsave(&ndlp->lock, flags);
6609 if (!kref_get_unless_zero(&ndlp->kref)) {
6610 spin_unlock_irqrestore(&ndlp->lock, flags);
6611 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6612 "0276 %s: ndlp:x%px refcnt:%d\n",
6613 __func__, (void *)ndlp, kref_read(&ndlp->kref));
6616 spin_unlock_irqrestore(&ndlp->lock, flags);
6618 WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__);
6621 return ndlp;
6624 /* This routine decrements the reference count for a ndlp structure. If the
6628 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6630 if (ndlp) {
6631 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6633 ndlp->nlp_DID, ndlp->nlp_flag,
6634 kref_read(&ndlp->kref));
6636 WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__);
6639 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
6657 struct lpfc_nodelist *ndlp;
6678 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
6679 if (ndlp->rport &&
6680 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
6686 &ndlp->nlp_flag)) {
6692 ndlp->nlp_rpi, ndlp->nlp_DID,
6693 ndlp->nlp_flag);
6760 struct lpfc_nodelist *ndlp;
6776 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6777 if (ndlp)
6778 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6791 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6792 if (ndlp)
6793 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);