1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 /* See Fibre Channel protocol T11 FC-LS for details */
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport_fc.h>
34 #include <uapi/scsi/fc/fc_fs.h>
35 #include <uapi/scsi/fc/fc_els.h>
36
37 #include "lpfc_hw4.h"
38 #include "lpfc_hw.h"
39 #include "lpfc_sli.h"
40 #include "lpfc_sli4.h"
41 #include "lpfc_nl.h"
42 #include "lpfc_disc.h"
43 #include "lpfc_scsi.h"
44 #include "lpfc.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_crtn.h"
47 #include "lpfc_vport.h"
48 #include "lpfc_debugfs.h"
49
50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
51 struct lpfc_iocbq *);
52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
53 struct lpfc_iocbq *);
54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
56 struct lpfc_nodelist *ndlp, uint8_t retry);
57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
58 struct lpfc_iocbq *iocb);
59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba,
60 struct lpfc_iocbq *cmdiocb,
61 struct lpfc_iocbq *rspiocb);
62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *,
63 struct lpfc_iocbq *);
64
65 static int lpfc_max_els_tries = 3;
66
67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport);
68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max);
69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid);
70
71 /**
72 * lpfc_els_chk_latt - Check host link attention event for a vport
73 * @vport: pointer to a host virtual N_Port data structure.
74 *
75 * This routine checks whether there is an outstanding host link
76 * attention event during the discovery process with the @vport. It is done
77 * by reading the HBA's Host Attention (HA) register. If there is any host
78 * link attention events during this @vport's discovery process, the @vport
79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
80 * be issued if the link state is not already in host link cleared state,
81 * and a return code shall indicate whether the host link attention event
82 * had happened.
83 *
84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
85 * state in LPFC_VPORT_READY, the request for checking host link attention
86 * event will be ignored and a return code shall indicate no host link
87 * attention event had happened.
88 *
89 * Return codes
90 * 0 - no host link attention event happened
91 * 1 - host link attention event happened
92 **/
93 int
lpfc_els_chk_latt(struct lpfc_vport * vport)94 lpfc_els_chk_latt(struct lpfc_vport *vport)
95 {
96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
97 struct lpfc_hba *phba = vport->phba;
98 uint32_t ha_copy;
99
100 if (vport->port_state >= LPFC_VPORT_READY ||
101 phba->link_state == LPFC_LINK_DOWN ||
102 phba->sli_rev > LPFC_SLI_REV3)
103 return 0;
104
105 /* Read the HBA Host Attention Register */
106 if (lpfc_readl(phba->HAregaddr, &ha_copy))
107 return 1;
108
109 if (!(ha_copy & HA_LATT))
110 return 0;
111
112 /* Pending Link Event during Discovery */
113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
114 "0237 Pending Link Event during "
115 "Discovery: State x%x\n",
116 phba->pport->port_state);
117
118 /* CLEAR_LA should re-enable link attention events and
119 * we should then immediately take a LATT event. The
120 * LATT processing should call lpfc_linkdown() which
121 * will cleanup any left over in-progress discovery
122 * events.
123 */
124 spin_lock_irq(shost->host_lock);
125 vport->fc_flag |= FC_ABORT_DISCOVERY;
126 spin_unlock_irq(shost->host_lock);
127
128 if (phba->link_state != LPFC_CLEAR_LA)
129 lpfc_issue_clear_la(phba, vport);
130
131 return 1;
132 }
133
lpfc_is_els_acc_rsp(struct lpfc_dmabuf * buf)134 static bool lpfc_is_els_acc_rsp(struct lpfc_dmabuf *buf)
135 {
136 struct fc_els_ls_acc *rsp = buf->virt;
137
138 if (rsp && rsp->la_cmd == ELS_LS_ACC)
139 return true;
140 return false;
141 }
142
143 /**
144 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
145 * @vport: pointer to a host virtual N_Port data structure.
146 * @expect_rsp: flag indicating whether response is expected.
147 * @cmd_size: size of the ELS command.
148 * @retry: number of retries to the command when it fails.
149 * @ndlp: pointer to a node-list data structure.
150 * @did: destination identifier.
151 * @elscmd: the ELS command code.
152 *
153 * This routine is used for allocating a lpfc-IOCB data structure from
154 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
155 * passed into the routine for discovery state machine to issue an Extended
156 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
157 * and preparation routine that is used by all the discovery state machine
158 * routines and the ELS command-specific fields will be later set up by
159 * the individual discovery machine routines after calling this routine
160 * allocating and preparing a generic IOCB data structure. It fills in the
161 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
162 * payload and response payload (if expected). The reference count on the
163 * ndlp is incremented by 1 and the reference to the ndlp is put into
164 * ndlp of the IOCB data structure for this IOCB to hold the ndlp
165 * reference for the command's callback function to access later.
166 *
167 * Return code
168 * Pointer to the newly allocated/prepared els iocb data structure
169 * NULL - when els iocb data structure allocation/preparation failed
170 **/
171 struct lpfc_iocbq *
lpfc_prep_els_iocb(struct lpfc_vport * vport,u8 expect_rsp,u16 cmd_size,u8 retry,struct lpfc_nodelist * ndlp,u32 did,u32 elscmd)172 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
173 u16 cmd_size, u8 retry,
174 struct lpfc_nodelist *ndlp, u32 did,
175 u32 elscmd)
176 {
177 struct lpfc_hba *phba = vport->phba;
178 struct lpfc_iocbq *elsiocb;
179 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp;
180 struct ulp_bde64_le *bpl;
181 u32 timeout = 0;
182
183 if (!lpfc_is_link_up(phba))
184 return NULL;
185
186 /* Allocate buffer for command iocb */
187 elsiocb = lpfc_sli_get_iocbq(phba);
188 if (!elsiocb)
189 return NULL;
190
191 /*
192 * If this command is for fabric controller and HBA running
193 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
194 */
195 if ((did == Fabric_DID) &&
196 (phba->hba_flag & HBA_FIP_SUPPORT) &&
197 ((elscmd == ELS_CMD_FLOGI) ||
198 (elscmd == ELS_CMD_FDISC) ||
199 (elscmd == ELS_CMD_LOGO)))
200 switch (elscmd) {
201 case ELS_CMD_FLOGI:
202 elsiocb->cmd_flag |=
203 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
204 & LPFC_FIP_ELS_ID_MASK);
205 break;
206 case ELS_CMD_FDISC:
207 elsiocb->cmd_flag |=
208 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
209 & LPFC_FIP_ELS_ID_MASK);
210 break;
211 case ELS_CMD_LOGO:
212 elsiocb->cmd_flag |=
213 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
214 & LPFC_FIP_ELS_ID_MASK);
215 break;
216 }
217 else
218 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
219
220 /* fill in BDEs for command */
221 /* Allocate buffer for command payload */
222 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
223 if (pcmd)
224 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
225 if (!pcmd || !pcmd->virt)
226 goto els_iocb_free_pcmb_exit;
227
228 INIT_LIST_HEAD(&pcmd->list);
229
230 /* Allocate buffer for response payload */
231 if (expect_rsp) {
232 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL);
233 if (prsp)
234 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
235 &prsp->phys);
236 if (!prsp || !prsp->virt)
237 goto els_iocb_free_prsp_exit;
238 INIT_LIST_HEAD(&prsp->list);
239 } else {
240 prsp = NULL;
241 }
242
243 /* Allocate buffer for Buffer ptr list */
244 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL);
245 if (pbuflist)
246 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
247 &pbuflist->phys);
248 if (!pbuflist || !pbuflist->virt)
249 goto els_iocb_free_pbuf_exit;
250
251 INIT_LIST_HEAD(&pbuflist->list);
252
253 if (expect_rsp) {
254 switch (elscmd) {
255 case ELS_CMD_FLOGI:
256 timeout = FF_DEF_RATOV * 2;
257 break;
258 case ELS_CMD_LOGO:
259 timeout = phba->fc_ratov;
260 break;
261 default:
262 timeout = phba->fc_ratov * 2;
263 }
264
265 /* Fill SGE for the num bde count */
266 elsiocb->num_bdes = 2;
267 }
268
269 if (phba->sli_rev == LPFC_SLI_REV4)
270 bmp = pcmd;
271 else
272 bmp = pbuflist;
273
274 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did,
275 elscmd, timeout, expect_rsp);
276
277 bpl = (struct ulp_bde64_le *)pbuflist->virt;
278 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys));
279 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys));
280 bpl->type_size = cpu_to_le32(cmd_size);
281 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
282
283 if (expect_rsp) {
284 bpl++;
285 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys));
286 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys));
287 bpl->type_size = cpu_to_le32(FCELSSIZE);
288 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
289 }
290
291 elsiocb->cmd_dmabuf = pcmd;
292 elsiocb->bpl_dmabuf = pbuflist;
293 elsiocb->retry = retry;
294 elsiocb->vport = vport;
295 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
296
297 if (prsp)
298 list_add(&prsp->list, &pcmd->list);
299 if (expect_rsp) {
300 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
302 "0116 Xmit ELS command x%x to remote "
303 "NPORT x%x I/O tag: x%x, port state:x%x "
304 "rpi x%x fc_flag:x%x\n",
305 elscmd, did, elsiocb->iotag,
306 vport->port_state, ndlp->nlp_rpi,
307 vport->fc_flag);
308 } else {
309 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
310 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
311 "0117 Xmit ELS response x%x to remote "
312 "NPORT x%x I/O tag: x%x, size: x%x "
313 "port_state x%x rpi x%x fc_flag x%x\n",
314 elscmd, ndlp->nlp_DID, elsiocb->iotag,
315 cmd_size, vport->port_state,
316 ndlp->nlp_rpi, vport->fc_flag);
317 }
318
319 return elsiocb;
320
321 els_iocb_free_pbuf_exit:
322 if (expect_rsp)
323 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
324 kfree(pbuflist);
325
326 els_iocb_free_prsp_exit:
327 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
328 kfree(prsp);
329
330 els_iocb_free_pcmb_exit:
331 kfree(pcmd);
332 lpfc_sli_release_iocbq(phba, elsiocb);
333 return NULL;
334 }
335
336 /**
337 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
338 * @vport: pointer to a host virtual N_Port data structure.
339 *
340 * This routine issues a fabric registration login for a @vport. An
341 * active ndlp node with Fabric_DID must already exist for this @vport.
342 * The routine invokes two mailbox commands to carry out fabric registration
343 * login through the HBA firmware: the first mailbox command requests the
344 * HBA to perform link configuration for the @vport; and the second mailbox
345 * command requests the HBA to perform the actual fabric registration login
346 * with the @vport.
347 *
348 * Return code
349 * 0 - successfully issued fabric registration login for @vport
350 * -ENXIO -- failed to issue fabric registration login for @vport
351 **/
352 int
lpfc_issue_fabric_reglogin(struct lpfc_vport * vport)353 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
354 {
355 struct lpfc_hba *phba = vport->phba;
356 LPFC_MBOXQ_t *mbox;
357 struct lpfc_nodelist *ndlp;
358 struct serv_parm *sp;
359 int rc;
360 int err = 0;
361
362 sp = &phba->fc_fabparam;
363 ndlp = lpfc_findnode_did(vport, Fabric_DID);
364 if (!ndlp) {
365 err = 1;
366 goto fail;
367 }
368
369 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
370 if (!mbox) {
371 err = 2;
372 goto fail;
373 }
374
375 vport->port_state = LPFC_FABRIC_CFG_LINK;
376 lpfc_config_link(phba, mbox);
377 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
378 mbox->vport = vport;
379
380 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
381 if (rc == MBX_NOT_FINISHED) {
382 err = 3;
383 goto fail_free_mbox;
384 }
385
386 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
387 if (!mbox) {
388 err = 4;
389 goto fail;
390 }
391 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
392 ndlp->nlp_rpi);
393 if (rc) {
394 err = 5;
395 goto fail_free_mbox;
396 }
397
398 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
399 mbox->vport = vport;
400 /* increment the reference count on ndlp to hold reference
401 * for the callback routine.
402 */
403 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
404 if (!mbox->ctx_ndlp) {
405 err = 6;
406 goto fail_free_mbox;
407 }
408
409 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
410 if (rc == MBX_NOT_FINISHED) {
411 err = 7;
412 goto fail_issue_reg_login;
413 }
414
415 return 0;
416
417 fail_issue_reg_login:
418 /* decrement the reference count on ndlp just incremented
419 * for the failed mbox command.
420 */
421 lpfc_nlp_put(ndlp);
422 fail_free_mbox:
423 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
424 fail:
425 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
426 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
427 "0249 Cannot issue Register Fabric login: Err %d\n",
428 err);
429 return -ENXIO;
430 }
431
432 /**
433 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
434 * @vport: pointer to a host virtual N_Port data structure.
435 *
436 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
437 * the @vport. This mailbox command is necessary for SLI4 port only.
438 *
439 * Return code
440 * 0 - successfully issued REG_VFI for @vport
441 * A failure code otherwise.
442 **/
443 int
lpfc_issue_reg_vfi(struct lpfc_vport * vport)444 lpfc_issue_reg_vfi(struct lpfc_vport *vport)
445 {
446 struct lpfc_hba *phba = vport->phba;
447 LPFC_MBOXQ_t *mboxq = NULL;
448 struct lpfc_nodelist *ndlp;
449 struct lpfc_dmabuf *dmabuf = NULL;
450 int rc = 0;
451
452 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
453 if ((phba->sli_rev == LPFC_SLI_REV4) &&
454 !(phba->link_flag & LS_LOOPBACK_MODE) &&
455 !(vport->fc_flag & FC_PT2PT)) {
456 ndlp = lpfc_findnode_did(vport, Fabric_DID);
457 if (!ndlp) {
458 rc = -ENODEV;
459 goto fail;
460 }
461 }
462
463 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
464 if (!mboxq) {
465 rc = -ENOMEM;
466 goto fail;
467 }
468
469 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
470 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
471 rc = lpfc_mbox_rsrc_prep(phba, mboxq);
472 if (rc) {
473 rc = -ENOMEM;
474 goto fail_mbox;
475 }
476 dmabuf = mboxq->ctx_buf;
477 memcpy(dmabuf->virt, &phba->fc_fabparam,
478 sizeof(struct serv_parm));
479 }
480
481 vport->port_state = LPFC_FABRIC_CFG_LINK;
482 if (dmabuf) {
483 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
484 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */
485 mboxq->ctx_buf = dmabuf;
486 } else {
487 lpfc_reg_vfi(mboxq, vport, 0);
488 }
489
490 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
491 mboxq->vport = vport;
492 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
493 if (rc == MBX_NOT_FINISHED) {
494 rc = -ENXIO;
495 goto fail_mbox;
496 }
497 return 0;
498
499 fail_mbox:
500 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
501 fail:
502 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
503 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
504 "0289 Issue Register VFI failed: Err %d\n", rc);
505 return rc;
506 }
507
508 /**
509 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
510 * @vport: pointer to a host virtual N_Port data structure.
511 *
512 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
513 * the @vport. This mailbox command is necessary for SLI4 port only.
514 *
515 * Return code
516 * 0 - successfully issued REG_VFI for @vport
517 * A failure code otherwise.
518 **/
519 int
lpfc_issue_unreg_vfi(struct lpfc_vport * vport)520 lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
521 {
522 struct lpfc_hba *phba = vport->phba;
523 struct Scsi_Host *shost;
524 LPFC_MBOXQ_t *mboxq;
525 int rc;
526
527 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
528 if (!mboxq) {
529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
530 "2556 UNREG_VFI mbox allocation failed"
531 "HBA state x%x\n", phba->pport->port_state);
532 return -ENOMEM;
533 }
534
535 lpfc_unreg_vfi(mboxq, vport);
536 mboxq->vport = vport;
537 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
538
539 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
540 if (rc == MBX_NOT_FINISHED) {
541 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
542 "2557 UNREG_VFI issue mbox failed rc x%x "
543 "HBA state x%x\n",
544 rc, phba->pport->port_state);
545 mempool_free(mboxq, phba->mbox_mem_pool);
546 return -EIO;
547 }
548
549 shost = lpfc_shost_from_vport(vport);
550 spin_lock_irq(shost->host_lock);
551 vport->fc_flag &= ~FC_VFI_REGISTERED;
552 spin_unlock_irq(shost->host_lock);
553 return 0;
554 }
555
556 /**
557 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
558 * @vport: pointer to a host virtual N_Port data structure.
559 * @sp: pointer to service parameter data structure.
560 *
561 * This routine is called from FLOGI/FDISC completion handler functions.
562 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
563 * node nodename is changed in the completion service parameter else return
564 * 0. This function also set flag in the vport data structure to delay
565 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
566 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
567 * node nodename is changed in the completion service parameter.
568 *
569 * Return code
570 * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
571 * 1 - FCID or Fabric Nodename or Fabric portname is changed.
572 *
573 **/
574 static uint8_t
lpfc_check_clean_addr_bit(struct lpfc_vport * vport,struct serv_parm * sp)575 lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
576 struct serv_parm *sp)
577 {
578 struct lpfc_hba *phba = vport->phba;
579 uint8_t fabric_param_changed = 0;
580 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
581
582 if ((vport->fc_prevDID != vport->fc_myDID) ||
583 memcmp(&vport->fabric_portname, &sp->portName,
584 sizeof(struct lpfc_name)) ||
585 memcmp(&vport->fabric_nodename, &sp->nodeName,
586 sizeof(struct lpfc_name)) ||
587 (vport->vport_flag & FAWWPN_PARAM_CHG)) {
588 fabric_param_changed = 1;
589 vport->vport_flag &= ~FAWWPN_PARAM_CHG;
590 }
591 /*
592 * Word 1 Bit 31 in common service parameter is overloaded.
593 * Word 1 Bit 31 in FLOGI request is multiple NPort request
594 * Word 1 Bit 31 in FLOGI response is clean address bit
595 *
596 * If fabric parameter is changed and clean address bit is
597 * cleared delay nport discovery if
598 * - vport->fc_prevDID != 0 (not initial discovery) OR
599 * - lpfc_delay_discovery module parameter is set.
600 */
601 if (fabric_param_changed && !sp->cmn.clean_address_bit &&
602 (vport->fc_prevDID || phba->cfg_delay_discovery)) {
603 spin_lock_irq(shost->host_lock);
604 vport->fc_flag |= FC_DISC_DELAYED;
605 spin_unlock_irq(shost->host_lock);
606 }
607
608 return fabric_param_changed;
609 }
610
611
612 /**
613 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
614 * @vport: pointer to a host virtual N_Port data structure.
615 * @ndlp: pointer to a node-list data structure.
616 * @sp: pointer to service parameter data structure.
617 * @ulp_word4: command response value
618 *
619 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
620 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
621 * port in a fabric topology. It properly sets up the parameters to the @ndlp
622 * from the IOCB response. It also check the newly assigned N_Port ID to the
623 * @vport against the previously assigned N_Port ID. If it is different from
624 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
625 * is invoked on all the remaining nodes with the @vport to unregister the
626 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
627 * is invoked to register login to the fabric.
628 *
629 * Return code
630 * 0 - Success (currently, always return 0)
631 **/
632 static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,struct serv_parm * sp,uint32_t ulp_word4)633 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
634 struct serv_parm *sp, uint32_t ulp_word4)
635 {
636 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
637 struct lpfc_hba *phba = vport->phba;
638 struct lpfc_nodelist *np;
639 struct lpfc_nodelist *next_np;
640 uint8_t fabric_param_changed;
641
642 spin_lock_irq(shost->host_lock);
643 vport->fc_flag |= FC_FABRIC;
644 spin_unlock_irq(shost->host_lock);
645
646 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
647 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
648 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
649
650 phba->fc_edtovResol = sp->cmn.edtovResolution;
651 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
652
653 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
654 spin_lock_irq(shost->host_lock);
655 vport->fc_flag |= FC_PUBLIC_LOOP;
656 spin_unlock_irq(shost->host_lock);
657 }
658
659 vport->fc_myDID = ulp_word4 & Mask_DID;
660 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
661 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
662 ndlp->nlp_class_sup = 0;
663 if (sp->cls1.classValid)
664 ndlp->nlp_class_sup |= FC_COS_CLASS1;
665 if (sp->cls2.classValid)
666 ndlp->nlp_class_sup |= FC_COS_CLASS2;
667 if (sp->cls3.classValid)
668 ndlp->nlp_class_sup |= FC_COS_CLASS3;
669 if (sp->cls4.classValid)
670 ndlp->nlp_class_sup |= FC_COS_CLASS4;
671 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
672 sp->cmn.bbRcvSizeLsb;
673
674 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
675 if (fabric_param_changed) {
676 /* Reset FDMI attribute masks based on config parameter */
677 if (phba->cfg_enable_SmartSAN ||
678 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
679 /* Setup appropriate attribute masks */
680 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
681 if (phba->cfg_enable_SmartSAN)
682 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
683 else
684 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
685 } else {
686 vport->fdmi_hba_mask = 0;
687 vport->fdmi_port_mask = 0;
688 }
689
690 }
691 memcpy(&vport->fabric_portname, &sp->portName,
692 sizeof(struct lpfc_name));
693 memcpy(&vport->fabric_nodename, &sp->nodeName,
694 sizeof(struct lpfc_name));
695 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
696
697 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
698 if (sp->cmn.response_multiple_NPort) {
699 lpfc_printf_vlog(vport, KERN_WARNING,
700 LOG_ELS | LOG_VPORT,
701 "1816 FLOGI NPIV supported, "
702 "response data 0x%x\n",
703 sp->cmn.response_multiple_NPort);
704 spin_lock_irq(&phba->hbalock);
705 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
706 spin_unlock_irq(&phba->hbalock);
707 } else {
708 /* Because we asked f/w for NPIV it still expects us
709 to call reg_vnpid at least for the physical host */
710 lpfc_printf_vlog(vport, KERN_WARNING,
711 LOG_ELS | LOG_VPORT,
712 "1817 Fabric does not support NPIV "
713 "- configuring single port mode.\n");
714 spin_lock_irq(&phba->hbalock);
715 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
716 spin_unlock_irq(&phba->hbalock);
717 }
718 }
719
720 /*
721 * For FC we need to do some special processing because of the SLI
722 * Port's default settings of the Common Service Parameters.
723 */
724 if ((phba->sli_rev == LPFC_SLI_REV4) &&
725 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
726 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
727 if (fabric_param_changed)
728 lpfc_unregister_fcf_prep(phba);
729
730 /* This should just update the VFI CSPs*/
731 if (vport->fc_flag & FC_VFI_REGISTERED)
732 lpfc_issue_reg_vfi(vport);
733 }
734
735 if (fabric_param_changed &&
736 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
737
738 /* If our NportID changed, we need to ensure all
739 * remaining NPORTs get unreg_login'ed.
740 */
741 list_for_each_entry_safe(np, next_np,
742 &vport->fc_nodes, nlp_listp) {
743 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
744 !(np->nlp_flag & NLP_NPR_ADISC))
745 continue;
746 spin_lock_irq(&np->lock);
747 np->nlp_flag &= ~NLP_NPR_ADISC;
748 spin_unlock_irq(&np->lock);
749 lpfc_unreg_rpi(vport, np);
750 }
751 lpfc_cleanup_pending_mbox(vport);
752
753 if (phba->sli_rev == LPFC_SLI_REV4) {
754 lpfc_sli4_unreg_all_rpis(vport);
755 lpfc_mbx_unreg_vpi(vport);
756 spin_lock_irq(shost->host_lock);
757 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
758 spin_unlock_irq(shost->host_lock);
759 }
760
761 /*
762 * For SLI3 and SLI4, the VPI needs to be reregistered in
763 * response to this fabric parameter change event.
764 */
765 spin_lock_irq(shost->host_lock);
766 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
767 spin_unlock_irq(shost->host_lock);
768 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
769 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
770 /*
771 * Driver needs to re-reg VPI in order for f/w
772 * to update the MAC address.
773 */
774 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
775 lpfc_register_new_vport(phba, vport, ndlp);
776 return 0;
777 }
778
779 if (phba->sli_rev < LPFC_SLI_REV4) {
780 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
781 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
782 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
783 lpfc_register_new_vport(phba, vport, ndlp);
784 else
785 lpfc_issue_fabric_reglogin(vport);
786 } else {
787 ndlp->nlp_type |= NLP_FABRIC;
788 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
789 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
790 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
791 lpfc_start_fdiscs(phba);
792 lpfc_do_scr_ns_plogi(phba, vport);
793 } else if (vport->fc_flag & FC_VFI_REGISTERED)
794 lpfc_issue_init_vpi(vport);
795 else {
796 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
797 "3135 Need register VFI: (x%x/%x)\n",
798 vport->fc_prevDID, vport->fc_myDID);
799 lpfc_issue_reg_vfi(vport);
800 }
801 }
802 return 0;
803 }
804
805 /**
806 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
807 * @vport: pointer to a host virtual N_Port data structure.
808 * @ndlp: pointer to a node-list data structure.
809 * @sp: pointer to service parameter data structure.
810 *
811 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
812 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
813 * in a point-to-point topology. First, the @vport's N_Port Name is compared
814 * with the received N_Port Name: if the @vport's N_Port Name is greater than
815 * the received N_Port Name lexicographically, this node shall assign local
816 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
817 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
818 * this node shall just wait for the remote node to issue PLOGI and assign
819 * N_Port IDs.
820 *
821 * Return code
822 * 0 - Success
823 * -ENXIO - Fail
824 **/
825 static int
lpfc_cmpl_els_flogi_nport(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,struct serv_parm * sp)826 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
827 struct serv_parm *sp)
828 {
829 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
830 struct lpfc_hba *phba = vport->phba;
831 LPFC_MBOXQ_t *mbox;
832 int rc;
833
834 spin_lock_irq(shost->host_lock);
835 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
836 vport->fc_flag |= FC_PT2PT;
837 spin_unlock_irq(shost->host_lock);
838
839 /* If we are pt2pt with another NPort, force NPIV off! */
840 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
841
842 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
843 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
844 lpfc_unregister_fcf_prep(phba);
845
846 spin_lock_irq(shost->host_lock);
847 vport->fc_flag &= ~FC_VFI_REGISTERED;
848 spin_unlock_irq(shost->host_lock);
849 phba->fc_topology_changed = 0;
850 }
851
852 rc = memcmp(&vport->fc_portname, &sp->portName,
853 sizeof(vport->fc_portname));
854
855 if (rc >= 0) {
856 /* This side will initiate the PLOGI */
857 spin_lock_irq(shost->host_lock);
858 vport->fc_flag |= FC_PT2PT_PLOGI;
859 spin_unlock_irq(shost->host_lock);
860
861 /*
862 * N_Port ID cannot be 0, set our Id to LocalID
863 * the other side will be RemoteID.
864 */
865
866 /* not equal */
867 if (rc)
868 vport->fc_myDID = PT2PT_LocalID;
869
870 /* If not registered with a transport, decrement ndlp reference
871 * count indicating that ndlp can be safely released when other
872 * references are removed.
873 */
874 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
875 lpfc_nlp_put(ndlp);
876
877 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
878 if (!ndlp) {
879 /*
880 * Cannot find existing Fabric ndlp, so allocate a
881 * new one
882 */
883 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
884 if (!ndlp)
885 goto fail;
886 }
887
888 memcpy(&ndlp->nlp_portname, &sp->portName,
889 sizeof(struct lpfc_name));
890 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
891 sizeof(struct lpfc_name));
892 /* Set state will put ndlp onto node list if not already done */
893 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
894 spin_lock_irq(&ndlp->lock);
895 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
896 spin_unlock_irq(&ndlp->lock);
897
898 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
899 if (!mbox)
900 goto fail;
901
902 lpfc_config_link(phba, mbox);
903
904 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
905 mbox->vport = vport;
906 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
907 if (rc == MBX_NOT_FINISHED) {
908 mempool_free(mbox, phba->mbox_mem_pool);
909 goto fail;
910 }
911 } else {
912 /* This side will wait for the PLOGI. If not registered with
913 * a transport, decrement node reference count indicating that
914 * ndlp can be released when other references are removed.
915 */
916 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
917 lpfc_nlp_put(ndlp);
918
919 /* Start discovery - this should just do CLEAR_LA */
920 lpfc_disc_start(vport);
921 }
922
923 return 0;
924 fail:
925 return -ENXIO;
926 }
927
928 /**
929 * lpfc_cmpl_els_flogi - Completion callback function for flogi
930 * @phba: pointer to lpfc hba data structure.
931 * @cmdiocb: pointer to lpfc command iocb data structure.
932 * @rspiocb: pointer to lpfc response iocb data structure.
933 *
934 * This routine is the top-level completion callback function for issuing
935 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
936 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
937 * retry has been made (either immediately or delayed with lpfc_els_retry()
938 * returning 1), the command IOCB will be released and function returned.
939 * If the retry attempt has been given up (possibly reach the maximum
940 * number of retries), one additional decrement of ndlp reference shall be
941 * invoked before going out after releasing the command IOCB. This will
942 * actually release the remote node (Note, lpfc_els_free_iocb() will also
943 * invoke one decrement of ndlp reference count). If no error reported in
944 * the IOCB status, the command Port ID field is used to determine whether
945 * this is a point-to-point topology or a fabric topology: if the Port ID
946 * field is assigned, it is a fabric topology; otherwise, it is a
947 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
948 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
949 * specific topology completion conditions.
950 **/
951 static void
lpfc_cmpl_els_flogi(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)952 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
953 struct lpfc_iocbq *rspiocb)
954 {
955 struct lpfc_vport *vport = cmdiocb->vport;
956 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
957 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
958 IOCB_t *irsp;
959 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
960 struct serv_parm *sp;
961 uint16_t fcf_index;
962 int rc;
963 u32 ulp_status, ulp_word4, tmo;
964 bool flogi_in_retry = false;
965
966 /* Check to see if link went down during discovery */
967 if (lpfc_els_chk_latt(vport)) {
968 /* One additional decrement on node reference count to
969 * trigger the release of the node
970 */
971 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
972 lpfc_nlp_put(ndlp);
973 goto out;
974 }
975
976 ulp_status = get_job_ulpstatus(phba, rspiocb);
977 ulp_word4 = get_job_word4(phba, rspiocb);
978
979 if (phba->sli_rev == LPFC_SLI_REV4) {
980 tmo = get_wqe_tmo(cmdiocb);
981 } else {
982 irsp = &rspiocb->iocb;
983 tmo = irsp->ulpTimeout;
984 }
985
986 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
987 "FLOGI cmpl: status:x%x/x%x state:x%x",
988 ulp_status, ulp_word4,
989 vport->port_state);
990
991 if (ulp_status) {
992 /*
993 * In case of FIP mode, perform roundrobin FCF failover
994 * due to new FCF discovery
995 */
996 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
997 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
998 if (phba->link_state < LPFC_LINK_UP)
999 goto stop_rr_fcf_flogi;
1000 if ((phba->fcoe_cvl_eventtag_attn ==
1001 phba->fcoe_cvl_eventtag) &&
1002 (ulp_status == IOSTAT_LOCAL_REJECT) &&
1003 ((ulp_word4 & IOERR_PARAM_MASK) ==
1004 IOERR_SLI_ABORTED))
1005 goto stop_rr_fcf_flogi;
1006 else
1007 phba->fcoe_cvl_eventtag_attn =
1008 phba->fcoe_cvl_eventtag;
1009 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1010 "2611 FLOGI failed on FCF (x%x), "
1011 "status:x%x/x%x, tmo:x%x, perform "
1012 "roundrobin FCF failover\n",
1013 phba->fcf.current_rec.fcf_indx,
1014 ulp_status, ulp_word4, tmo);
1015 lpfc_sli4_set_fcf_flogi_fail(phba,
1016 phba->fcf.current_rec.fcf_indx);
1017 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
1018 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
1019 if (rc)
1020 goto out;
1021 }
1022
1023 stop_rr_fcf_flogi:
1024 /* FLOGI failure */
1025 if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
1026 ((ulp_word4 & IOERR_PARAM_MASK) ==
1027 IOERR_LOOP_OPEN_FAILURE)))
1028 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1029 "2858 FLOGI failure Status:x%x/x%x TMO"
1030 ":x%x Data x%x x%x\n",
1031 ulp_status, ulp_word4, tmo,
1032 phba->hba_flag, phba->fcf.fcf_flag);
1033
1034 /* Check for retry */
1035 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1036 /* Address a timing race with dev_loss. If dev_loss
1037 * is active on this FPort node, put the initial ref
1038 * count back to stop premature node release actions.
1039 */
1040 lpfc_check_nlp_post_devloss(vport, ndlp);
1041 flogi_in_retry = true;
1042 goto out;
1043 }
1044
1045 /* The FLOGI will not be retried. If the FPort node is not
1046 * registered with the SCSI transport, remove the initial
1047 * reference to trigger node release.
1048 */
1049 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) &&
1050 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
1051 lpfc_nlp_put(ndlp);
1052
1053 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
1054 "0150 FLOGI failure Status:x%x/x%x "
1055 "xri x%x TMO:x%x refcnt %d\n",
1056 ulp_status, ulp_word4, cmdiocb->sli4_xritag,
1057 tmo, kref_read(&ndlp->kref));
1058
1059 /* If this is not a loop open failure, bail out */
1060 if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
1061 ((ulp_word4 & IOERR_PARAM_MASK) ==
1062 IOERR_LOOP_OPEN_FAILURE))) {
1063 /* FLOGI failure */
1064 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1065 "0100 FLOGI failure Status:x%x/x%x "
1066 "TMO:x%x\n",
1067 ulp_status, ulp_word4, tmo);
1068 goto flogifail;
1069 }
1070
1071 /* FLOGI failed, so there is no fabric */
1072 spin_lock_irq(shost->host_lock);
1073 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
1074 FC_PT2PT_NO_NVME);
1075 spin_unlock_irq(shost->host_lock);
1076
1077 /* If private loop, then allow max outstanding els to be
1078 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1079 * alpa map would take too long otherwise.
1080 */
1081 if (phba->alpa_map[0] == 0)
1082 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1083 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1084 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1085 (vport->fc_prevDID != vport->fc_myDID) ||
1086 phba->fc_topology_changed)) {
1087 if (vport->fc_flag & FC_VFI_REGISTERED) {
1088 if (phba->fc_topology_changed) {
1089 lpfc_unregister_fcf_prep(phba);
1090 spin_lock_irq(shost->host_lock);
1091 vport->fc_flag &= ~FC_VFI_REGISTERED;
1092 spin_unlock_irq(shost->host_lock);
1093 phba->fc_topology_changed = 0;
1094 } else {
1095 lpfc_sli4_unreg_all_rpis(vport);
1096 }
1097 }
1098
1099 /* Do not register VFI if the driver aborted FLOGI */
1100 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
1101 lpfc_issue_reg_vfi(vport);
1102
1103 goto out;
1104 }
1105 goto flogifail;
1106 }
1107 spin_lock_irq(shost->host_lock);
1108 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
1109 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
1110 spin_unlock_irq(shost->host_lock);
1111
1112 /*
1113 * The FLOGI succeeded. Sync the data for the CPU before
1114 * accessing it.
1115 */
1116 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1117 if (!prsp)
1118 goto out;
1119 if (!lpfc_is_els_acc_rsp(prsp))
1120 goto out;
1121 sp = prsp->virt + sizeof(uint32_t);
1122
1123 /* FLOGI completes successfully */
1124 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1125 "0101 FLOGI completes successfully, I/O tag:x%x "
1126 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n",
1127 cmdiocb->iotag, cmdiocb->sli4_xritag,
1128 ulp_word4, sp->cmn.e_d_tov,
1129 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1130 vport->port_state, vport->fc_flag,
1131 sp->cmn.priority_tagging, kref_read(&ndlp->kref));
1132
1133 /* reinitialize the VMID datastructure before returning */
1134 if (lpfc_is_vmid_enabled(phba))
1135 lpfc_reinit_vmid(vport);
1136 if (sp->cmn.priority_tagging)
1137 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
1138 LPFC_VMID_TYPE_PRIO);
1139
1140 /*
1141 * Address a timing race with dev_loss. If dev_loss is active on
1142 * this FPort node, put the initial ref count back to stop premature
1143 * node release actions.
1144 */
1145 lpfc_check_nlp_post_devloss(vport, ndlp);
1146 if (vport->port_state == LPFC_FLOGI) {
1147 /*
1148 * If Common Service Parameters indicate Nport
1149 * we are point to point, if Fport we are Fabric.
1150 */
1151 if (sp->cmn.fPort)
1152 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp,
1153 ulp_word4);
1154 else if (!(phba->hba_flag & HBA_FCOE_MODE))
1155 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
1156 else {
1157 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1158 "2831 FLOGI response with cleared Fabric "
1159 "bit fcf_index 0x%x "
1160 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1161 "Fabric Name "
1162 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
1163 phba->fcf.current_rec.fcf_indx,
1164 phba->fcf.current_rec.switch_name[0],
1165 phba->fcf.current_rec.switch_name[1],
1166 phba->fcf.current_rec.switch_name[2],
1167 phba->fcf.current_rec.switch_name[3],
1168 phba->fcf.current_rec.switch_name[4],
1169 phba->fcf.current_rec.switch_name[5],
1170 phba->fcf.current_rec.switch_name[6],
1171 phba->fcf.current_rec.switch_name[7],
1172 phba->fcf.current_rec.fabric_name[0],
1173 phba->fcf.current_rec.fabric_name[1],
1174 phba->fcf.current_rec.fabric_name[2],
1175 phba->fcf.current_rec.fabric_name[3],
1176 phba->fcf.current_rec.fabric_name[4],
1177 phba->fcf.current_rec.fabric_name[5],
1178 phba->fcf.current_rec.fabric_name[6],
1179 phba->fcf.current_rec.fabric_name[7]);
1180
1181 lpfc_nlp_put(ndlp);
1182 spin_lock_irq(&phba->hbalock);
1183 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1184 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1185 spin_unlock_irq(&phba->hbalock);
1186 phba->fcf.fcf_redisc_attempted = 0; /* reset */
1187 goto out;
1188 }
1189 if (!rc) {
1190 /* Mark the FCF discovery process done */
1191 if (phba->hba_flag & HBA_FIP_SUPPORT)
1192 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
1193 LOG_ELS,
1194 "2769 FLOGI to FCF (x%x) "
1195 "completed successfully\n",
1196 phba->fcf.current_rec.fcf_indx);
1197 spin_lock_irq(&phba->hbalock);
1198 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1199 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1200 spin_unlock_irq(&phba->hbalock);
1201 phba->fcf.fcf_redisc_attempted = 0; /* reset */
1202 goto out;
1203 }
1204 } else if (vport->port_state > LPFC_FLOGI &&
1205 vport->fc_flag & FC_PT2PT) {
1206 /*
1207 * In a p2p topology, it is possible that discovery has
1208 * already progressed, and this completion can be ignored.
1209 * Recheck the indicated topology.
1210 */
1211 if (!sp->cmn.fPort)
1212 goto out;
1213 }
1214
1215 flogifail:
1216 spin_lock_irq(&phba->hbalock);
1217 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1218 spin_unlock_irq(&phba->hbalock);
1219
1220 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
1221 /* FLOGI failed, so just use loop map to make discovery list */
1222 lpfc_disc_list_loopmap(vport);
1223
1224 /* Start discovery */
1225 lpfc_disc_start(vport);
1226 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) ||
1227 (((ulp_word4 & IOERR_PARAM_MASK) !=
1228 IOERR_SLI_ABORTED) &&
1229 ((ulp_word4 & IOERR_PARAM_MASK) !=
1230 IOERR_SLI_DOWN))) &&
1231 (phba->link_state != LPFC_CLEAR_LA)) {
1232 /* If FLOGI failed enable link interrupt. */
1233 lpfc_issue_clear_la(phba, vport);
1234 }
1235 out:
1236 if (!flogi_in_retry)
1237 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING;
1238
1239 lpfc_els_free_iocb(phba, cmdiocb);
1240 lpfc_nlp_put(ndlp);
1241 }
1242
1243 /**
1244 * lpfc_cmpl_els_link_down - Completion callback function for ELS command
1245 * aborted during a link down
1246 * @phba: pointer to lpfc hba data structure.
1247 * @cmdiocb: pointer to lpfc command iocb data structure.
1248 * @rspiocb: pointer to lpfc response iocb data structure.
1249 *
1250 */
1251 static void
lpfc_cmpl_els_link_down(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1252 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1253 struct lpfc_iocbq *rspiocb)
1254 {
1255 uint32_t *pcmd;
1256 uint32_t cmd;
1257 u32 ulp_status, ulp_word4;
1258
1259 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
1260 cmd = *pcmd;
1261
1262 ulp_status = get_job_ulpstatus(phba, rspiocb);
1263 ulp_word4 = get_job_word4(phba, rspiocb);
1264
1265 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1266 "6445 ELS completes after LINK_DOWN: "
1267 " Status %x/%x cmd x%x flg x%x\n",
1268 ulp_status, ulp_word4, cmd,
1269 cmdiocb->cmd_flag);
1270
1271 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) {
1272 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
1273 atomic_dec(&phba->fabric_iocb_count);
1274 }
1275 lpfc_els_free_iocb(phba, cmdiocb);
1276 }
1277
1278 /**
1279 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1280 * @vport: pointer to a host virtual N_Port data structure.
1281 * @ndlp: pointer to a node-list data structure.
1282 * @retry: number of retries to the command IOCB.
1283 *
1284 * This routine issues a Fabric Login (FLOGI) Request ELS command
1285 * for a @vport. The initiator service parameters are put into the payload
1286 * of the FLOGI Request IOCB and the top-level callback function pointer
1287 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1288 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1289 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1290 *
1291 * Note that the ndlp reference count will be incremented by 1 for holding the
1292 * ndlp and the reference to ndlp will be stored into the ndlp field of
1293 * the IOCB for the completion callback function to the FLOGI ELS command.
1294 *
1295 * Return code
1296 * 0 - successfully issued flogi iocb for @vport
1297 * 1 - failed to issue flogi iocb for @vport
1298 **/
1299 static int
lpfc_issue_els_flogi(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)1300 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1301 uint8_t retry)
1302 {
1303 struct lpfc_hba *phba = vport->phba;
1304 struct serv_parm *sp;
1305 union lpfc_wqe128 *wqe = NULL;
1306 IOCB_t *icmd = NULL;
1307 struct lpfc_iocbq *elsiocb;
1308 struct lpfc_iocbq defer_flogi_acc;
1309 u8 *pcmd, ct;
1310 uint16_t cmdsize;
1311 uint32_t tmo, did;
1312 int rc;
1313
1314 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1315 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1316 ndlp->nlp_DID, ELS_CMD_FLOGI);
1317
1318 if (!elsiocb)
1319 return 1;
1320
1321 wqe = &elsiocb->wqe;
1322 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
1323 icmd = &elsiocb->iocb;
1324
1325 /* For FLOGI request, remainder of payload is service parameters */
1326 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1327 pcmd += sizeof(uint32_t);
1328 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1329 sp = (struct serv_parm *) pcmd;
1330
1331 /* Setup CSPs accordingly for Fabric */
1332 sp->cmn.e_d_tov = 0;
1333 sp->cmn.w2.r_a_tov = 0;
1334 sp->cmn.virtual_fabric_support = 0;
1335 sp->cls1.classValid = 0;
1336 if (sp->cmn.fcphLow < FC_PH3)
1337 sp->cmn.fcphLow = FC_PH3;
1338 if (sp->cmn.fcphHigh < FC_PH3)
1339 sp->cmn.fcphHigh = FC_PH3;
1340
1341 /* Determine if switch supports priority tagging */
1342 if (phba->cfg_vmid_priority_tagging) {
1343 sp->cmn.priority_tagging = 1;
1344 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */
1345 if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0,
1346 sizeof(vport->lpfc_vmid_host_uuid))) {
1347 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn,
1348 sizeof(phba->wwpn));
1349 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn,
1350 sizeof(phba->wwnn));
1351 }
1352 }
1353
1354 if (phba->sli_rev == LPFC_SLI_REV4) {
1355 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1356 LPFC_SLI_INTF_IF_TYPE_0) {
1357 /* FLOGI needs to be 3 for WQE FCFI */
1358 ct = SLI4_CT_FCFI;
1359 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
1360
1361 /* Set the fcfi to the fcfi we registered with */
1362 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
1363 phba->fcf.fcfi);
1364 }
1365
1366 /* Can't do SLI4 class2 without support sequence coalescing */
1367 sp->cls2.classValid = 0;
1368 sp->cls2.seqDelivery = 0;
1369 } else {
1370 /* Historical, setting sequential-delivery bit for SLI3 */
1371 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
1372 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
1373 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1374 sp->cmn.request_multiple_Nport = 1;
1375 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1376 icmd->ulpCt_h = 1;
1377 icmd->ulpCt_l = 0;
1378 } else {
1379 sp->cmn.request_multiple_Nport = 0;
1380 }
1381
1382 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1383 icmd->un.elsreq64.myID = 0;
1384 icmd->un.elsreq64.fl = 1;
1385 }
1386 }
1387
1388 tmo = phba->fc_ratov;
1389 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1390 lpfc_set_disctmo(vport);
1391 phba->fc_ratov = tmo;
1392
1393 phba->fc_stat.elsXmitFLOGI++;
1394 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi;
1395
1396 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1397 "Issue FLOGI: opt:x%x",
1398 phba->sli3_options, 0, 0);
1399
1400 elsiocb->ndlp = lpfc_nlp_get(ndlp);
1401 if (!elsiocb->ndlp) {
1402 lpfc_els_free_iocb(phba, elsiocb);
1403 return 1;
1404 }
1405
1406 /* Avoid race with FLOGI completion and hba_flags. */
1407 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
1408
1409 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1410 if (rc == IOCB_ERROR) {
1411 phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
1412 lpfc_els_free_iocb(phba, elsiocb);
1413 lpfc_nlp_put(ndlp);
1414 return 1;
1415 }
1416
1417 /* Clear external loopback plug detected flag */
1418 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
1419
1420 /* Check for a deferred FLOGI ACC condition */
1421 if (phba->defer_flogi_acc_flag) {
1422 /* lookup ndlp for received FLOGI */
1423 ndlp = lpfc_findnode_did(vport, 0);
1424 if (!ndlp)
1425 return 0;
1426
1427 did = vport->fc_myDID;
1428 vport->fc_myDID = Fabric_DID;
1429
1430 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
1431
1432 if (phba->sli_rev == LPFC_SLI_REV4) {
1433 bf_set(wqe_ctxt_tag,
1434 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
1435 phba->defer_flogi_acc_rx_id);
1436 bf_set(wqe_rcvoxid,
1437 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
1438 phba->defer_flogi_acc_ox_id);
1439 } else {
1440 icmd = &defer_flogi_acc.iocb;
1441 icmd->ulpContext = phba->defer_flogi_acc_rx_id;
1442 icmd->unsli3.rcvsli3.ox_id =
1443 phba->defer_flogi_acc_ox_id;
1444 }
1445
1446 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1447 "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
1448 " ox_id: x%x, hba_flag x%x\n",
1449 phba->defer_flogi_acc_rx_id,
1450 phba->defer_flogi_acc_ox_id, phba->hba_flag);
1451
1452 /* Send deferred FLOGI ACC */
1453 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
1454 ndlp, NULL);
1455
1456 phba->defer_flogi_acc_flag = false;
1457 vport->fc_myDID = did;
1458
1459 /* Decrement ndlp reference count to indicate the node can be
1460 * released when other references are removed.
1461 */
1462 lpfc_nlp_put(ndlp);
1463 }
1464
1465 return 0;
1466 }
1467
1468 /**
1469 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1470 * @phba: pointer to lpfc hba data structure.
1471 *
1472 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1473 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1474 * list and issues an abort IOCB commond on each outstanding IOCB that
1475 * contains a active Fabric_DID ndlp. Note that this function is to issue
1476 * the abort IOCB command on all the outstanding IOCBs, thus when this
1477 * function returns, it does not guarantee all the IOCBs are actually aborted.
1478 *
1479 * Return code
1480 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1481 **/
1482 int
lpfc_els_abort_flogi(struct lpfc_hba * phba)1483 lpfc_els_abort_flogi(struct lpfc_hba *phba)
1484 {
1485 struct lpfc_sli_ring *pring;
1486 struct lpfc_iocbq *iocb, *next_iocb;
1487 struct lpfc_nodelist *ndlp;
1488 u32 ulp_command;
1489
1490 /* Abort outstanding I/O on NPort <nlp_DID> */
1491 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1492 "0201 Abort outstanding I/O on NPort x%x\n",
1493 Fabric_DID);
1494
1495 pring = lpfc_phba_elsring(phba);
1496 if (unlikely(!pring))
1497 return -EIO;
1498
1499 /*
1500 * Check the txcmplq for an iocb that matches the nport the driver is
1501 * searching for.
1502 */
1503 spin_lock_irq(&phba->hbalock);
1504 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1505 ulp_command = get_job_cmnd(phba, iocb);
1506 if (ulp_command == CMD_ELS_REQUEST64_CR) {
1507 ndlp = iocb->ndlp;
1508 if (ndlp && ndlp->nlp_DID == Fabric_DID) {
1509 if ((phba->pport->fc_flag & FC_PT2PT) &&
1510 !(phba->pport->fc_flag & FC_PT2PT_PLOGI))
1511 iocb->fabric_cmd_cmpl =
1512 lpfc_ignore_els_cmpl;
1513 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
1514 NULL);
1515 }
1516 }
1517 }
1518 /* Make sure HBA is alive */
1519 lpfc_issue_hb_tmo(phba);
1520
1521 spin_unlock_irq(&phba->hbalock);
1522
1523 return 0;
1524 }
1525
1526 /**
1527 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1528 * @vport: pointer to a host virtual N_Port data structure.
1529 *
1530 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1531 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1532 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1533 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1534 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1535 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1536 * @vport.
1537 *
1538 * Return code
1539 * 0 - failed to issue initial flogi for @vport
1540 * 1 - successfully issued initial flogi for @vport
1541 **/
1542 int
lpfc_initial_flogi(struct lpfc_vport * vport)1543 lpfc_initial_flogi(struct lpfc_vport *vport)
1544 {
1545 struct lpfc_nodelist *ndlp;
1546
1547 vport->port_state = LPFC_FLOGI;
1548 lpfc_set_disctmo(vport);
1549
1550 /* First look for the Fabric ndlp */
1551 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1552 if (!ndlp) {
1553 /* Cannot find existing Fabric ndlp, so allocate a new one */
1554 ndlp = lpfc_nlp_init(vport, Fabric_DID);
1555 if (!ndlp)
1556 return 0;
1557 /* Set the node type */
1558 ndlp->nlp_type |= NLP_FABRIC;
1559
1560 /* Put ndlp onto node list */
1561 lpfc_enqueue_node(vport, ndlp);
1562 }
1563
1564 /* Reset the Fabric flag, topology change may have happened */
1565 vport->fc_flag &= ~FC_FABRIC;
1566 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
1567 /* A node reference should be retained while registered with a
1568 * transport or dev-loss-evt work is pending.
1569 * Otherwise, decrement node reference to trigger release.
1570 */
1571 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
1572 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
1573 lpfc_nlp_put(ndlp);
1574 return 0;
1575 }
1576 return 1;
1577 }
1578
1579 /**
1580 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1581 * @vport: pointer to a host virtual N_Port data structure.
1582 *
1583 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1584 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1585 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1586 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1587 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1588 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1589 * @vport.
1590 *
1591 * Return code
1592 * 0 - failed to issue initial fdisc for @vport
1593 * 1 - successfully issued initial fdisc for @vport
1594 **/
1595 int
lpfc_initial_fdisc(struct lpfc_vport * vport)1596 lpfc_initial_fdisc(struct lpfc_vport *vport)
1597 {
1598 struct lpfc_nodelist *ndlp;
1599
1600 /* First look for the Fabric ndlp */
1601 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1602 if (!ndlp) {
1603 /* Cannot find existing Fabric ndlp, so allocate a new one */
1604 ndlp = lpfc_nlp_init(vport, Fabric_DID);
1605 if (!ndlp)
1606 return 0;
1607
1608 /* NPIV is only supported in Fabrics. */
1609 ndlp->nlp_type |= NLP_FABRIC;
1610
1611 /* Put ndlp onto node list */
1612 lpfc_enqueue_node(vport, ndlp);
1613 }
1614
1615 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1616 /* A node reference should be retained while registered with a
1617 * transport or dev-loss-evt work is pending.
1618 * Otherwise, decrement node reference to trigger release.
1619 */
1620 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
1621 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
1622 lpfc_nlp_put(ndlp);
1623 return 0;
1624 }
1625 return 1;
1626 }
1627
1628 /**
1629 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1630 * @vport: pointer to a host virtual N_Port data structure.
1631 *
1632 * This routine checks whether there are more remaining Port Logins
1633 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1634 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1635 * to issue ELS PLOGIs up to the configured discover threads with the
1636 * @vport (@vport->cfg_discovery_threads). The function also decrement
1637 * the @vport's num_disc_node by 1 if it is not already 0.
1638 **/
1639 void
lpfc_more_plogi(struct lpfc_vport * vport)1640 lpfc_more_plogi(struct lpfc_vport *vport)
1641 {
1642 if (vport->num_disc_nodes)
1643 vport->num_disc_nodes--;
1644
1645 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
1646 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1647 "0232 Continue discovery with %d PLOGIs to go "
1648 "Data: x%x x%x x%x\n",
1649 vport->num_disc_nodes, vport->fc_plogi_cnt,
1650 vport->fc_flag, vport->port_state);
1651 /* Check to see if there are more PLOGIs to be sent */
1652 if (vport->fc_flag & FC_NLP_MORE)
1653 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1654 lpfc_els_disc_plogi(vport);
1655
1656 return;
1657 }
1658
1659 /**
1660 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp
1661 * @phba: pointer to lpfc hba data structure.
1662 * @prsp: pointer to response IOCB payload.
1663 * @ndlp: pointer to a node-list data structure.
1664 *
1665 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1666 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1667 * The following cases are considered N_Port confirmed:
1668 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1669 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1670 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1671 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1672 * 1) if there is a node on vport list other than the @ndlp with the same
1673 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1674 * on that node to release the RPI associated with the node; 2) if there is
1675 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1676 * into, a new node shall be allocated (or activated). In either case, the
1677 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1678 * be released and the new_ndlp shall be put on to the vport node list and
1679 * its pointer returned as the confirmed node.
1680 *
1681 * Note that before the @ndlp got "released", the keepDID from not-matching
1682 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1683 * of the @ndlp. This is because the release of @ndlp is actually to put it
1684 * into an inactive state on the vport node list and the vport node list
1685 * management algorithm does not allow two node with a same DID.
1686 *
1687 * Return code
1688 * pointer to the PLOGI N_Port @ndlp
1689 **/
1690 static struct lpfc_nodelist *
lpfc_plogi_confirm_nport(struct lpfc_hba * phba,uint32_t * prsp,struct lpfc_nodelist * ndlp)1691 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1692 struct lpfc_nodelist *ndlp)
1693 {
1694 struct lpfc_vport *vport = ndlp->vport;
1695 struct lpfc_nodelist *new_ndlp;
1696 struct serv_parm *sp;
1697 uint8_t name[sizeof(struct lpfc_name)];
1698 uint32_t keepDID = 0, keep_nlp_flag = 0;
1699 uint32_t keep_new_nlp_flag = 0;
1700 uint16_t keep_nlp_state;
1701 u32 keep_nlp_fc4_type = 0;
1702 struct lpfc_nvme_rport *keep_nrport = NULL;
1703 unsigned long *active_rrqs_xri_bitmap = NULL;
1704
1705 /* Fabric nodes can have the same WWPN so we don't bother searching
1706 * by WWPN. Just return the ndlp that was given to us.
1707 */
1708 if (ndlp->nlp_type & NLP_FABRIC)
1709 return ndlp;
1710
1711 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1712 memset(name, 0, sizeof(struct lpfc_name));
1713
1714 /* Now we find out if the NPort we are logging into, matches the WWPN
1715 * we have for that ndlp. If not, we have some work to do.
1716 */
1717 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1718
1719 /* return immediately if the WWPN matches ndlp */
1720 if (!new_ndlp || (new_ndlp == ndlp))
1721 return ndlp;
1722
1723 /*
1724 * Unregister from backend if not done yet. Could have been skipped
1725 * due to ADISC
1726 */
1727 lpfc_nlp_unreg_node(vport, new_ndlp);
1728
1729 if (phba->sli_rev == LPFC_SLI_REV4) {
1730 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
1731 GFP_KERNEL);
1732 if (active_rrqs_xri_bitmap)
1733 memset(active_rrqs_xri_bitmap, 0,
1734 phba->cfg_rrq_xri_bitmap_sz);
1735 }
1736
1737 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1738 "3178 PLOGI confirm: ndlp x%x x%x x%x: "
1739 "new_ndlp x%x x%x x%x\n",
1740 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type,
1741 (new_ndlp ? new_ndlp->nlp_DID : 0),
1742 (new_ndlp ? new_ndlp->nlp_flag : 0),
1743 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
1744
1745 keepDID = new_ndlp->nlp_DID;
1746
1747 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
1748 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
1749 phba->cfg_rrq_xri_bitmap_sz);
1750
1751 /* At this point in this routine, we know new_ndlp will be
1752 * returned. however, any previous GID_FTs that were done
1753 * would have updated nlp_fc4_type in ndlp, so we must ensure
1754 * new_ndlp has the right value.
1755 */
1756 if (vport->fc_flag & FC_FABRIC) {
1757 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
1758 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
1759 }
1760
1761 lpfc_unreg_rpi(vport, new_ndlp);
1762 new_ndlp->nlp_DID = ndlp->nlp_DID;
1763 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1764 if (phba->sli_rev == LPFC_SLI_REV4)
1765 memcpy(new_ndlp->active_rrqs_xri_bitmap,
1766 ndlp->active_rrqs_xri_bitmap,
1767 phba->cfg_rrq_xri_bitmap_sz);
1768
1769 /* Lock both ndlps */
1770 spin_lock_irq(&ndlp->lock);
1771 spin_lock_irq(&new_ndlp->lock);
1772 keep_new_nlp_flag = new_ndlp->nlp_flag;
1773 keep_nlp_flag = ndlp->nlp_flag;
1774 new_ndlp->nlp_flag = ndlp->nlp_flag;
1775
1776 /* if new_ndlp had NLP_UNREG_INP set, keep it */
1777 if (keep_new_nlp_flag & NLP_UNREG_INP)
1778 new_ndlp->nlp_flag |= NLP_UNREG_INP;
1779 else
1780 new_ndlp->nlp_flag &= ~NLP_UNREG_INP;
1781
1782 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */
1783 if (keep_new_nlp_flag & NLP_RPI_REGISTERED)
1784 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1785 else
1786 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1787
1788 /*
1789 * Retain the DROPPED flag. This will take care of the init
1790 * refcount when affecting the state change
1791 */
1792 if (keep_new_nlp_flag & NLP_DROPPED)
1793 new_ndlp->nlp_flag |= NLP_DROPPED;
1794 else
1795 new_ndlp->nlp_flag &= ~NLP_DROPPED;
1796
1797 ndlp->nlp_flag = keep_new_nlp_flag;
1798
1799 /* if ndlp had NLP_UNREG_INP set, keep it */
1800 if (keep_nlp_flag & NLP_UNREG_INP)
1801 ndlp->nlp_flag |= NLP_UNREG_INP;
1802 else
1803 ndlp->nlp_flag &= ~NLP_UNREG_INP;
1804
1805 /* if ndlp had NLP_RPI_REGISTERED set, keep it */
1806 if (keep_nlp_flag & NLP_RPI_REGISTERED)
1807 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1808 else
1809 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1810
1811 /*
1812 * Retain the DROPPED flag. This will take care of the init
1813 * refcount when affecting the state change
1814 */
1815 if (keep_nlp_flag & NLP_DROPPED)
1816 ndlp->nlp_flag |= NLP_DROPPED;
1817 else
1818 ndlp->nlp_flag &= ~NLP_DROPPED;
1819
1820 spin_unlock_irq(&new_ndlp->lock);
1821 spin_unlock_irq(&ndlp->lock);
1822
1823 /* Set nlp_states accordingly */
1824 keep_nlp_state = new_ndlp->nlp_state;
1825 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1826
1827 /* interchange the nvme remoteport structs */
1828 keep_nrport = new_ndlp->nrport;
1829 new_ndlp->nrport = ndlp->nrport;
1830
1831 /* Move this back to NPR state */
1832 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1833 /* The ndlp doesn't have a portname yet, but does have an
1834 * NPort ID. The new_ndlp portname matches the Rport's
1835 * portname. Reinstantiate the new_ndlp and reset the ndlp.
1836 */
1837 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1838 "3179 PLOGI confirm NEW: %x %x\n",
1839 new_ndlp->nlp_DID, keepDID);
1840
1841 /* Two ndlps cannot have the same did on the nodelist.
1842 * The KeepDID and keep_nlp_fc4_type need to be swapped
1843 * because ndlp is inflight with no WWPN.
1844 */
1845 ndlp->nlp_DID = keepDID;
1846 ndlp->nlp_fc4_type = keep_nlp_fc4_type;
1847 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1848 if (phba->sli_rev == LPFC_SLI_REV4 &&
1849 active_rrqs_xri_bitmap)
1850 memcpy(ndlp->active_rrqs_xri_bitmap,
1851 active_rrqs_xri_bitmap,
1852 phba->cfg_rrq_xri_bitmap_sz);
1853
1854 } else {
1855 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1856 "3180 PLOGI confirm SWAP: %x %x\n",
1857 new_ndlp->nlp_DID, keepDID);
1858
1859 lpfc_unreg_rpi(vport, ndlp);
1860
1861 /* The ndlp and new_ndlp both have WWPNs but are swapping
1862 * NPort Ids and attributes.
1863 */
1864 ndlp->nlp_DID = keepDID;
1865 ndlp->nlp_fc4_type = keep_nlp_fc4_type;
1866
1867 if (phba->sli_rev == LPFC_SLI_REV4 &&
1868 active_rrqs_xri_bitmap)
1869 memcpy(ndlp->active_rrqs_xri_bitmap,
1870 active_rrqs_xri_bitmap,
1871 phba->cfg_rrq_xri_bitmap_sz);
1872
1873 /* Since we are switching over to the new_ndlp,
1874 * reset the old ndlp state
1875 */
1876 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1877 (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1878 keep_nlp_state = NLP_STE_NPR_NODE;
1879 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1880 ndlp->nrport = keep_nrport;
1881 }
1882
1883 /*
1884 * If ndlp is not associated with any rport we can drop it here else
1885 * let dev_loss_tmo_callbk trigger DEVICE_RM event
1886 */
1887 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE))
1888 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
1889
1890 if (phba->sli_rev == LPFC_SLI_REV4 &&
1891 active_rrqs_xri_bitmap)
1892 mempool_free(active_rrqs_xri_bitmap,
1893 phba->active_rrq_pool);
1894
1895 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1896 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
1897 new_ndlp->nlp_DID, new_ndlp->nlp_flag,
1898 new_ndlp->nlp_fc4_type);
1899
1900 return new_ndlp;
1901 }
1902
1903 /**
1904 * lpfc_end_rscn - Check and handle more rscn for a vport
1905 * @vport: pointer to a host virtual N_Port data structure.
1906 *
1907 * This routine checks whether more Registration State Change
1908 * Notifications (RSCNs) came in while the discovery state machine was in
1909 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1910 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1911 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1912 * handling the RSCNs.
1913 **/
1914 void
lpfc_end_rscn(struct lpfc_vport * vport)1915 lpfc_end_rscn(struct lpfc_vport *vport)
1916 {
1917 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1918
1919 if (vport->fc_flag & FC_RSCN_MODE) {
1920 /*
1921 * Check to see if more RSCNs came in while we were
1922 * processing this one.
1923 */
1924 if (vport->fc_rscn_id_cnt ||
1925 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1926 lpfc_els_handle_rscn(vport);
1927 else {
1928 spin_lock_irq(shost->host_lock);
1929 vport->fc_flag &= ~FC_RSCN_MODE;
1930 spin_unlock_irq(shost->host_lock);
1931 }
1932 }
1933 }
1934
1935 /**
1936 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1937 * @phba: pointer to lpfc hba data structure.
1938 * @cmdiocb: pointer to lpfc command iocb data structure.
1939 * @rspiocb: pointer to lpfc response iocb data structure.
1940 *
1941 * This routine will call the clear rrq function to free the rrq and
1942 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1943 * exist then the clear_rrq is still called because the rrq needs to
1944 * be freed.
1945 **/
1946
1947 static void
lpfc_cmpl_els_rrq(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1948 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1949 struct lpfc_iocbq *rspiocb)
1950 {
1951 struct lpfc_vport *vport = cmdiocb->vport;
1952 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
1953 struct lpfc_node_rrq *rrq;
1954 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1955 u32 ulp_word4 = get_job_word4(phba, rspiocb);
1956
1957 /* we pass cmdiocb to state machine which needs rspiocb as well */
1958 rrq = cmdiocb->context_un.rrq;
1959 cmdiocb->rsp_iocb = rspiocb;
1960
1961 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1962 "RRQ cmpl: status:x%x/x%x did:x%x",
1963 ulp_status, ulp_word4,
1964 get_job_els_rsp64_did(phba, cmdiocb));
1965
1966
1967 /* rrq completes to NPort <nlp_DID> */
1968 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1969 "2880 RRQ completes to DID x%x "
1970 "Data: x%x x%x x%x x%x x%x\n",
1971 ndlp->nlp_DID, ulp_status, ulp_word4,
1972 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid);
1973
1974 if (ulp_status) {
1975 /* Check for retry */
1976 /* RRQ failed Don't print the vport to vport rjts */
1977 if (ulp_status != IOSTAT_LS_RJT ||
1978 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
1979 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
1980 (phba)->pport->cfg_log_verbose & LOG_ELS)
1981 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1982 "2881 RRQ failure DID:%06X Status:"
1983 "x%x/x%x\n",
1984 ndlp->nlp_DID, ulp_status,
1985 ulp_word4);
1986 }
1987
1988 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1989 lpfc_els_free_iocb(phba, cmdiocb);
1990 lpfc_nlp_put(ndlp);
1991 return;
1992 }
1993 /**
1994 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1995 * @phba: pointer to lpfc hba data structure.
1996 * @cmdiocb: pointer to lpfc command iocb data structure.
1997 * @rspiocb: pointer to lpfc response iocb data structure.
1998 *
1999 * This routine is the completion callback function for issuing the Port
2000 * Login (PLOGI) command. For PLOGI completion, there must be an active
2001 * ndlp on the vport node list that matches the remote node ID from the
2002 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
2003 * ignored and command IOCB released. The PLOGI response IOCB status is
2004 * checked for error conditions. If there is error status reported, PLOGI
2005 * retry shall be attempted by invoking the lpfc_els_retry() routine.
2006 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
2007 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
2008 * (DSM) is set for this PLOGI completion. Finally, it checks whether
2009 * there are additional N_Port nodes with the vport that need to perform
2010 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
2011 * PLOGIs.
2012 **/
2013 static void
lpfc_cmpl_els_plogi(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2014 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2015 struct lpfc_iocbq *rspiocb)
2016 {
2017 struct lpfc_vport *vport = cmdiocb->vport;
2018 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2019 IOCB_t *irsp;
2020 struct lpfc_nodelist *ndlp, *free_ndlp;
2021 struct lpfc_dmabuf *prsp;
2022 int disc;
2023 struct serv_parm *sp = NULL;
2024 u32 ulp_status, ulp_word4, did, iotag;
2025 bool release_node = false;
2026
2027 /* we pass cmdiocb to state machine which needs rspiocb as well */
2028 cmdiocb->rsp_iocb = rspiocb;
2029
2030 ulp_status = get_job_ulpstatus(phba, rspiocb);
2031 ulp_word4 = get_job_word4(phba, rspiocb);
2032 did = get_job_els_rsp64_did(phba, cmdiocb);
2033
2034 if (phba->sli_rev == LPFC_SLI_REV4) {
2035 iotag = get_wqe_reqtag(cmdiocb);
2036 } else {
2037 irsp = &rspiocb->iocb;
2038 iotag = irsp->ulpIoTag;
2039 }
2040
2041 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2042 "PLOGI cmpl: status:x%x/x%x did:x%x",
2043 ulp_status, ulp_word4, did);
2044
2045 ndlp = lpfc_findnode_did(vport, did);
2046 if (!ndlp) {
2047 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2048 "0136 PLOGI completes to NPort x%x "
2049 "with no ndlp. Data: x%x x%x x%x\n",
2050 did, ulp_status, ulp_word4, iotag);
2051 goto out_freeiocb;
2052 }
2053
2054 /* Since ndlp can be freed in the disc state machine, note if this node
2055 * is being used during discovery.
2056 */
2057 spin_lock_irq(&ndlp->lock);
2058 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2059 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2060 spin_unlock_irq(&ndlp->lock);
2061
2062 /* PLOGI completes to NPort <nlp_DID> */
2063 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2064 "0102 PLOGI completes to NPort x%06x "
2065 "IoTag x%x Data: x%x x%x x%x x%x x%x\n",
2066 ndlp->nlp_DID, iotag,
2067 ndlp->nlp_fc4_type,
2068 ulp_status, ulp_word4,
2069 disc, vport->num_disc_nodes);
2070
2071 /* Check to see if link went down during discovery */
2072 if (lpfc_els_chk_latt(vport)) {
2073 spin_lock_irq(&ndlp->lock);
2074 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2075 spin_unlock_irq(&ndlp->lock);
2076 goto out;
2077 }
2078
2079 if (ulp_status) {
2080 /* Check for retry */
2081 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2082 /* ELS command is being retried */
2083 if (disc) {
2084 spin_lock_irq(&ndlp->lock);
2085 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2086 spin_unlock_irq(&ndlp->lock);
2087 }
2088 goto out;
2089 }
2090 /* PLOGI failed Don't print the vport to vport rjts */
2091 if (ulp_status != IOSTAT_LS_RJT ||
2092 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
2093 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
2094 (phba)->pport->cfg_log_verbose & LOG_ELS)
2095 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2096 "2753 PLOGI failure DID:%06X "
2097 "Status:x%x/x%x\n",
2098 ndlp->nlp_DID, ulp_status,
2099 ulp_word4);
2100
2101 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2102 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
2103 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2104 NLP_EVT_CMPL_PLOGI);
2105
2106 /* If a PLOGI collision occurred, the node needs to continue
2107 * with the reglogin process.
2108 */
2109 spin_lock_irq(&ndlp->lock);
2110 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) &&
2111 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
2112 spin_unlock_irq(&ndlp->lock);
2113 goto out;
2114 }
2115
2116 /* No PLOGI collision and the node is not registered with the
2117 * scsi or nvme transport. It is no longer an active node. Just
2118 * start the device remove process.
2119 */
2120 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
2121 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2122 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
2123 release_node = true;
2124 }
2125 spin_unlock_irq(&ndlp->lock);
2126
2127 if (release_node)
2128 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2129 NLP_EVT_DEVICE_RM);
2130 } else {
2131 /* Good status, call state machine */
2132 prsp = list_get_first(&cmdiocb->cmd_dmabuf->list,
2133 struct lpfc_dmabuf, list);
2134 if (!prsp)
2135 goto out;
2136 if (!lpfc_is_els_acc_rsp(prsp))
2137 goto out;
2138 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
2139
2140 sp = (struct serv_parm *)((u8 *)prsp->virt +
2141 sizeof(u32));
2142
2143 ndlp->vmid_support = 0;
2144 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) ||
2145 (phba->cfg_vmid_priority_tagging &&
2146 sp->cmn.priority_tagging)) {
2147 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS,
2148 "4018 app_hdr_support %d tagging %d DID x%x\n",
2149 sp->cmn.app_hdr_support,
2150 sp->cmn.priority_tagging,
2151 ndlp->nlp_DID);
2152 /* if the dest port supports VMID, mark it in ndlp */
2153 ndlp->vmid_support = 1;
2154 }
2155
2156 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2157 NLP_EVT_CMPL_PLOGI);
2158 }
2159
2160 if (disc && vport->num_disc_nodes) {
2161 /* Check to see if there are more PLOGIs to be sent */
2162 lpfc_more_plogi(vport);
2163
2164 if (vport->num_disc_nodes == 0) {
2165 spin_lock_irq(shost->host_lock);
2166 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2167 spin_unlock_irq(shost->host_lock);
2168
2169 lpfc_can_disctmo(vport);
2170 lpfc_end_rscn(vport);
2171 }
2172 }
2173
2174 out:
2175 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2176 "PLOGI Cmpl PUT: did:x%x refcnt %d",
2177 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
2178
2179 out_freeiocb:
2180 /* Release the reference on the original I/O request. */
2181 free_ndlp = cmdiocb->ndlp;
2182
2183 lpfc_els_free_iocb(phba, cmdiocb);
2184 lpfc_nlp_put(free_ndlp);
2185 return;
2186 }
2187
2188 /**
2189 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
2190 * @vport: pointer to a host virtual N_Port data structure.
2191 * @did: destination port identifier.
2192 * @retry: number of retries to the command IOCB.
2193 *
2194 * This routine issues a Port Login (PLOGI) command to a remote N_Port
2195 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
2196 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
2197 * This routine constructs the proper fields of the PLOGI IOCB and invokes
2198 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
2199 *
2200 * Note that the ndlp reference count will be incremented by 1 for holding
2201 * the ndlp and the reference to ndlp will be stored into the ndlp field
2202 * of the IOCB for the completion callback function to the PLOGI ELS command.
2203 *
2204 * Return code
2205 * 0 - Successfully issued a plogi for @vport
2206 * 1 - failed to issue a plogi for @vport
2207 **/
2208 int
lpfc_issue_els_plogi(struct lpfc_vport * vport,uint32_t did,uint8_t retry)2209 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
2210 {
2211 struct lpfc_hba *phba = vport->phba;
2212 struct serv_parm *sp;
2213 struct lpfc_nodelist *ndlp;
2214 struct lpfc_iocbq *elsiocb;
2215 uint8_t *pcmd;
2216 uint16_t cmdsize;
2217 int ret;
2218
2219 ndlp = lpfc_findnode_did(vport, did);
2220 if (!ndlp)
2221 return 1;
2222
2223 /* Defer the processing of the issue PLOGI until after the
2224 * outstanding UNREG_RPI mbox command completes, unless we
2225 * are going offline. This logic does not apply for Fabric DIDs
2226 */
2227 if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) &&
2228 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
2229 !(vport->fc_flag & FC_OFFLINE_MODE)) {
2230 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2231 "4110 Issue PLOGI x%x deferred "
2232 "on NPort x%x rpi x%x flg x%x Data:"
2233 " x%px\n",
2234 ndlp->nlp_defer_did, ndlp->nlp_DID,
2235 ndlp->nlp_rpi, ndlp->nlp_flag, ndlp);
2236
2237 /* We can only defer 1st PLOGI */
2238 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
2239 ndlp->nlp_defer_did = did;
2240 return 0;
2241 }
2242
2243 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
2244 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
2245 ELS_CMD_PLOGI);
2246 if (!elsiocb)
2247 return 1;
2248
2249 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
2250
2251 /* For PLOGI request, remainder of payload is service parameters */
2252 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
2253 pcmd += sizeof(uint32_t);
2254 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
2255 sp = (struct serv_parm *) pcmd;
2256
2257 /*
2258 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
2259 * to device on remote loops work.
2260 */
2261 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
2262 sp->cmn.altBbCredit = 1;
2263
2264 if (sp->cmn.fcphLow < FC_PH_4_3)
2265 sp->cmn.fcphLow = FC_PH_4_3;
2266
2267 if (sp->cmn.fcphHigh < FC_PH3)
2268 sp->cmn.fcphHigh = FC_PH3;
2269
2270 sp->cmn.valid_vendor_ver_level = 0;
2271 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
2272 sp->cmn.bbRcvSizeMsb &= 0xF;
2273
2274 /* Check if the destination port supports VMID */
2275 ndlp->vmid_support = 0;
2276 if (vport->vmid_priority_tagging)
2277 sp->cmn.priority_tagging = 1;
2278 else if (phba->cfg_vmid_app_header &&
2279 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags))
2280 sp->cmn.app_hdr_support = 1;
2281
2282 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2283 "Issue PLOGI: did:x%x",
2284 did, 0, 0);
2285
2286 /* If our firmware supports this feature, convey that
2287 * information to the target using the vendor specific field.
2288 */
2289 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
2290 sp->cmn.valid_vendor_ver_level = 1;
2291 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
2292 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
2293 }
2294
2295 phba->fc_stat.elsXmitPLOGI++;
2296 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi;
2297
2298 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2299 "Issue PLOGI: did:x%x refcnt %d",
2300 did, kref_read(&ndlp->kref), 0);
2301 elsiocb->ndlp = lpfc_nlp_get(ndlp);
2302 if (!elsiocb->ndlp) {
2303 lpfc_els_free_iocb(phba, elsiocb);
2304 return 1;
2305 }
2306
2307 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2308 if (ret) {
2309 lpfc_els_free_iocb(phba, elsiocb);
2310 lpfc_nlp_put(ndlp);
2311 return 1;
2312 }
2313
2314 return 0;
2315 }
2316
2317 /**
2318 * lpfc_cmpl_els_prli - Completion callback function for prli
2319 * @phba: pointer to lpfc hba data structure.
2320 * @cmdiocb: pointer to lpfc command iocb data structure.
2321 * @rspiocb: pointer to lpfc response iocb data structure.
2322 *
2323 * This routine is the completion callback function for a Process Login
2324 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
2325 * status. If there is error status reported, PRLI retry shall be attempted
2326 * by invoking the lpfc_els_retry() routine. Otherwise, the state
2327 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
2328 * ndlp to mark the PRLI completion.
2329 **/
2330 static void
lpfc_cmpl_els_prli(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2331 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2332 struct lpfc_iocbq *rspiocb)
2333 {
2334 struct lpfc_vport *vport = cmdiocb->vport;
2335 struct lpfc_nodelist *ndlp;
2336 char *mode;
2337 u32 loglevel;
2338 u32 ulp_status;
2339 u32 ulp_word4;
2340 bool release_node = false;
2341
2342 /* we pass cmdiocb to state machine which needs rspiocb as well */
2343 cmdiocb->rsp_iocb = rspiocb;
2344
2345 ndlp = cmdiocb->ndlp;
2346
2347 ulp_status = get_job_ulpstatus(phba, rspiocb);
2348 ulp_word4 = get_job_word4(phba, rspiocb);
2349
2350 spin_lock_irq(&ndlp->lock);
2351 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2352
2353 /* Driver supports multiple FC4 types. Counters matter. */
2354 vport->fc_prli_sent--;
2355 ndlp->fc4_prli_sent--;
2356 spin_unlock_irq(&ndlp->lock);
2357
2358 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2359 "PRLI cmpl: status:x%x/x%x did:x%x",
2360 ulp_status, ulp_word4,
2361 ndlp->nlp_DID);
2362
2363 /* PRLI completes to NPort <nlp_DID> */
2364 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2365 "0103 PRLI completes to NPort x%06x "
2366 "Data: x%x x%x x%x x%x x%x\n",
2367 ndlp->nlp_DID, ulp_status, ulp_word4,
2368 vport->num_disc_nodes, ndlp->fc4_prli_sent,
2369 ndlp->fc4_xpt_flags);
2370
2371 /* Check to see if link went down during discovery */
2372 if (lpfc_els_chk_latt(vport))
2373 goto out;
2374
2375 if (ulp_status) {
2376 /* Check for retry */
2377 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2378 /* ELS command is being retried */
2379 goto out;
2380 }
2381
2382 /* If we don't send GFT_ID to Fabric, a PRLI error
2383 * could be expected.
2384 */
2385 if ((vport->fc_flag & FC_FABRIC) ||
2386 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) {
2387 mode = KERN_ERR;
2388 loglevel = LOG_TRACE_EVENT;
2389 } else {
2390 mode = KERN_INFO;
2391 loglevel = LOG_ELS;
2392 }
2393
2394 /* PRLI failed */
2395 lpfc_printf_vlog(vport, mode, loglevel,
2396 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
2397 "data: x%x x%x x%x\n",
2398 ndlp->nlp_DID, ulp_status,
2399 ulp_word4, ndlp->nlp_state,
2400 ndlp->fc4_prli_sent, ndlp->nlp_flag);
2401
2402 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2403 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
2404 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2405 NLP_EVT_CMPL_PRLI);
2406
2407 /* The following condition catches an inflight transition
2408 * mismatch typically caused by an RSCN. Skip any
2409 * processing to allow recovery.
2410 */
2411 if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
2412 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) ||
2413 (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2414 ndlp->nlp_flag & NLP_DELAY_TMO)) {
2415 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
2416 "2784 PRLI cmpl: Allow Node recovery "
2417 "DID x%06x nstate x%x nflag x%x\n",
2418 ndlp->nlp_DID, ndlp->nlp_state,
2419 ndlp->nlp_flag);
2420 goto out;
2421 }
2422
2423 /*
2424 * For P2P topology, retain the node so that PLOGI can be
2425 * attempted on it again.
2426 */
2427 if (vport->fc_flag & FC_PT2PT)
2428 goto out;
2429
2430 /* As long as this node is not registered with the SCSI
2431 * or NVMe transport and no other PRLIs are outstanding,
2432 * it is no longer an active node. Otherwise devloss
2433 * handles the final cleanup.
2434 */
2435 spin_lock_irq(&ndlp->lock);
2436 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
2437 !ndlp->fc4_prli_sent) {
2438 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2439 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
2440 release_node = true;
2441 }
2442 spin_unlock_irq(&ndlp->lock);
2443
2444 if (release_node)
2445 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2446 NLP_EVT_DEVICE_RM);
2447 } else {
2448 /* Good status, call state machine. However, if another
2449 * PRLI is outstanding, don't call the state machine
2450 * because final disposition to Mapped or Unmapped is
2451 * completed there.
2452 */
2453 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2454 NLP_EVT_CMPL_PRLI);
2455 }
2456
2457 out:
2458 lpfc_els_free_iocb(phba, cmdiocb);
2459 lpfc_nlp_put(ndlp);
2460 return;
2461 }
2462
2463 /**
2464 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
2465 * @vport: pointer to a host virtual N_Port data structure.
2466 * @ndlp: pointer to a node-list data structure.
2467 * @retry: number of retries to the command IOCB.
2468 *
2469 * This routine issues a Process Login (PRLI) ELS command for the
2470 * @vport. The PRLI service parameters are set up in the payload of the
2471 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
2472 * is put to the IOCB completion callback func field before invoking the
2473 * routine lpfc_sli_issue_iocb() to send out PRLI command.
2474 *
2475 * Note that the ndlp reference count will be incremented by 1 for holding the
2476 * ndlp and the reference to ndlp will be stored into the ndlp field of
2477 * the IOCB for the completion callback function to the PRLI ELS command.
2478 *
2479 * Return code
2480 * 0 - successfully issued prli iocb command for @vport
2481 * 1 - failed to issue prli iocb command for @vport
2482 **/
2483 int
lpfc_issue_els_prli(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)2484 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2485 uint8_t retry)
2486 {
2487 int rc = 0;
2488 struct lpfc_hba *phba = vport->phba;
2489 PRLI *npr;
2490 struct lpfc_nvme_prli *npr_nvme;
2491 struct lpfc_iocbq *elsiocb;
2492 uint8_t *pcmd;
2493 uint16_t cmdsize;
2494 u32 local_nlp_type, elscmd;
2495
2496 /*
2497 * If we are in RSCN mode, the FC4 types supported from a
2498 * previous GFT_ID command may not be accurate. So, if we
2499 * are a NVME Initiator, always look for the possibility of
2500 * the remote NPort beng a NVME Target.
2501 */
2502 if (phba->sli_rev == LPFC_SLI_REV4 &&
2503 vport->fc_flag & FC_RSCN_MODE &&
2504 vport->nvmei_support)
2505 ndlp->nlp_fc4_type |= NLP_FC4_NVME;
2506 local_nlp_type = ndlp->nlp_fc4_type;
2507
2508 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
2509 * fields here before any of them can complete.
2510 */
2511 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
2512 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
2513 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
2514 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC);
2515 ndlp->nvme_fb_size = 0;
2516
2517 send_next_prli:
2518 if (local_nlp_type & NLP_FC4_FCP) {
2519 /* Payload is 4 + 16 = 20 x14 bytes. */
2520 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2521 elscmd = ELS_CMD_PRLI;
2522 } else if (local_nlp_type & NLP_FC4_NVME) {
2523 /* Payload is 4 + 20 = 24 x18 bytes. */
2524 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli));
2525 elscmd = ELS_CMD_NVMEPRLI;
2526 } else {
2527 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2528 "3083 Unknown FC_TYPE x%x ndlp x%06x\n",
2529 ndlp->nlp_fc4_type, ndlp->nlp_DID);
2530 return 1;
2531 }
2532
2533 /* SLI3 ports don't support NVME. If this rport is a strict NVME
2534 * FC4 type, implicitly LOGO.
2535 */
2536 if (phba->sli_rev == LPFC_SLI_REV3 &&
2537 ndlp->nlp_fc4_type == NLP_FC4_NVME) {
2538 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2539 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
2540 ndlp->nlp_type);
2541 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
2542 return 1;
2543 }
2544
2545 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2546 ndlp->nlp_DID, elscmd);
2547 if (!elsiocb)
2548 return 1;
2549
2550 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
2551
2552 /* For PRLI request, remainder of payload is service parameters */
2553 memset(pcmd, 0, cmdsize);
2554
2555 if (local_nlp_type & NLP_FC4_FCP) {
2556 /* Remainder of payload is FCP PRLI parameter page.
2557 * Note: this data structure is defined as
2558 * BE/LE in the structure definition so no
2559 * byte swap call is made.
2560 */
2561 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI;
2562 pcmd += sizeof(uint32_t);
2563 npr = (PRLI *)pcmd;
2564
2565 /*
2566 * If our firmware version is 3.20 or later,
2567 * set the following bits for FC-TAPE support.
2568 */
2569 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
2570 npr->ConfmComplAllowed = 1;
2571 npr->Retry = 1;
2572 npr->TaskRetryIdReq = 1;
2573 }
2574 npr->estabImagePair = 1;
2575 npr->readXferRdyDis = 1;
2576 if (vport->cfg_first_burst_size)
2577 npr->writeXferRdyDis = 1;
2578
2579 /* For FCP support */
2580 npr->prliType = PRLI_FCP_TYPE;
2581 npr->initiatorFunc = 1;
2582 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ;
2583
2584 /* Remove FCP type - processed. */
2585 local_nlp_type &= ~NLP_FC4_FCP;
2586 } else if (local_nlp_type & NLP_FC4_NVME) {
2587 /* Remainder of payload is NVME PRLI parameter page.
2588 * This data structure is the newer definition that
2589 * uses bf macros so a byte swap is required.
2590 */
2591 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI;
2592 pcmd += sizeof(uint32_t);
2593 npr_nvme = (struct lpfc_nvme_prli *)pcmd;
2594 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
2595 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
2596 if (phba->nsler) {
2597 bf_set(prli_nsler, npr_nvme, 1);
2598 bf_set(prli_conf, npr_nvme, 1);
2599 }
2600
2601 /* Only initiators request first burst. */
2602 if ((phba->cfg_nvme_enable_fb) &&
2603 !phba->nvmet_support)
2604 bf_set(prli_fba, npr_nvme, 1);
2605
2606 if (phba->nvmet_support) {
2607 bf_set(prli_tgt, npr_nvme, 1);
2608 bf_set(prli_disc, npr_nvme, 1);
2609 } else {
2610 bf_set(prli_init, npr_nvme, 1);
2611 bf_set(prli_conf, npr_nvme, 1);
2612 }
2613
2614 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
2615 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
2616 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ;
2617
2618 /* Remove NVME type - processed. */
2619 local_nlp_type &= ~NLP_FC4_NVME;
2620 }
2621
2622 phba->fc_stat.elsXmitPRLI++;
2623 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli;
2624
2625 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2626 "Issue PRLI: did:x%x refcnt %d",
2627 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
2628 elsiocb->ndlp = lpfc_nlp_get(ndlp);
2629 if (!elsiocb->ndlp) {
2630 lpfc_els_free_iocb(phba, elsiocb);
2631 return 1;
2632 }
2633
2634 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2635 if (rc == IOCB_ERROR) {
2636 lpfc_els_free_iocb(phba, elsiocb);
2637 lpfc_nlp_put(ndlp);
2638 return 1;
2639 }
2640
2641 /* The vport counters are used for lpfc_scan_finished, but
2642 * the ndlp is used to track outstanding PRLIs for different
2643 * FC4 types.
2644 */
2645 spin_lock_irq(&ndlp->lock);
2646 ndlp->nlp_flag |= NLP_PRLI_SND;
2647 vport->fc_prli_sent++;
2648 ndlp->fc4_prli_sent++;
2649 spin_unlock_irq(&ndlp->lock);
2650
2651 /* The driver supports 2 FC4 types. Make sure
2652 * a PRLI is issued for all types before exiting.
2653 */
2654 if (phba->sli_rev == LPFC_SLI_REV4 &&
2655 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
2656 goto send_next_prli;
2657 else
2658 return 0;
2659 }
2660
2661 /**
2662 * lpfc_rscn_disc - Perform rscn discovery for a vport
2663 * @vport: pointer to a host virtual N_Port data structure.
2664 *
2665 * This routine performs Registration State Change Notification (RSCN)
2666 * discovery for a @vport. If the @vport's node port recovery count is not
2667 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
2668 * the nodes that need recovery. If none of the PLOGI were needed through
2669 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
2670 * invoked to check and handle possible more RSCN came in during the period
2671 * of processing the current ones.
2672 **/
2673 static void
lpfc_rscn_disc(struct lpfc_vport * vport)2674 lpfc_rscn_disc(struct lpfc_vport *vport)
2675 {
2676 lpfc_can_disctmo(vport);
2677
2678 /* RSCN discovery */
2679 /* go thru NPR nodes and issue ELS PLOGIs */
2680 if (vport->fc_npr_cnt)
2681 if (lpfc_els_disc_plogi(vport))
2682 return;
2683
2684 lpfc_end_rscn(vport);
2685 }
2686
2687 /**
2688 * lpfc_adisc_done - Complete the adisc phase of discovery
2689 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2690 *
2691 * This function is called when the final ADISC is completed during discovery.
2692 * This function handles clearing link attention or issuing reg_vpi depending
2693 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2694 * discovery.
2695 * This function is called with no locks held.
2696 **/
2697 static void
lpfc_adisc_done(struct lpfc_vport * vport)2698 lpfc_adisc_done(struct lpfc_vport *vport)
2699 {
2700 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2701 struct lpfc_hba *phba = vport->phba;
2702
2703 /*
2704 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2705 * and continue discovery.
2706 */
2707 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2708 !(vport->fc_flag & FC_RSCN_MODE) &&
2709 (phba->sli_rev < LPFC_SLI_REV4)) {
2710
2711 /*
2712 * If link is down, clear_la and reg_vpi will be done after
2713 * flogi following a link up event
2714 */
2715 if (!lpfc_is_link_up(phba))
2716 return;
2717
2718 /* The ADISCs are complete. Doesn't matter if they
2719 * succeeded or failed because the ADISC completion
2720 * routine guarantees to call the state machine and
2721 * the RPI is either unregistered (failed ADISC response)
2722 * or the RPI is still valid and the node is marked
2723 * mapped for a target. The exchanges should be in the
2724 * correct state. This code is specific to SLI3.
2725 */
2726 lpfc_issue_clear_la(phba, vport);
2727 lpfc_issue_reg_vpi(phba, vport);
2728 return;
2729 }
2730 /*
2731 * For SLI2, we need to set port_state to READY
2732 * and continue discovery.
2733 */
2734 if (vport->port_state < LPFC_VPORT_READY) {
2735 /* If we get here, there is nothing to ADISC */
2736 lpfc_issue_clear_la(phba, vport);
2737 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2738 vport->num_disc_nodes = 0;
2739 /* go thru NPR list, issue ELS PLOGIs */
2740 if (vport->fc_npr_cnt)
2741 lpfc_els_disc_plogi(vport);
2742 if (!vport->num_disc_nodes) {
2743 spin_lock_irq(shost->host_lock);
2744 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2745 spin_unlock_irq(shost->host_lock);
2746 lpfc_can_disctmo(vport);
2747 lpfc_end_rscn(vport);
2748 }
2749 }
2750 vport->port_state = LPFC_VPORT_READY;
2751 } else
2752 lpfc_rscn_disc(vport);
2753 }
2754
2755 /**
2756 * lpfc_more_adisc - Issue more adisc as needed
2757 * @vport: pointer to a host virtual N_Port data structure.
2758 *
2759 * This routine determines whether there are more ndlps on a @vport
2760 * node list need to have Address Discover (ADISC) issued. If so, it will
2761 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2762 * remaining nodes which need to have ADISC sent.
2763 **/
2764 void
lpfc_more_adisc(struct lpfc_vport * vport)2765 lpfc_more_adisc(struct lpfc_vport *vport)
2766 {
2767 if (vport->num_disc_nodes)
2768 vport->num_disc_nodes--;
2769 /* Continue discovery with <num_disc_nodes> ADISCs to go */
2770 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2771 "0210 Continue discovery with %d ADISCs to go "
2772 "Data: x%x x%x x%x\n",
2773 vport->num_disc_nodes, vport->fc_adisc_cnt,
2774 vport->fc_flag, vport->port_state);
2775 /* Check to see if there are more ADISCs to be sent */
2776 if (vport->fc_flag & FC_NLP_MORE) {
2777 lpfc_set_disctmo(vport);
2778 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2779 lpfc_els_disc_adisc(vport);
2780 }
2781 if (!vport->num_disc_nodes)
2782 lpfc_adisc_done(vport);
2783 return;
2784 }
2785
2786 /**
2787 * lpfc_cmpl_els_adisc - Completion callback function for adisc
2788 * @phba: pointer to lpfc hba data structure.
2789 * @cmdiocb: pointer to lpfc command iocb data structure.
2790 * @rspiocb: pointer to lpfc response iocb data structure.
2791 *
2792 * This routine is the completion function for issuing the Address Discover
2793 * (ADISC) command. It first checks to see whether link went down during
2794 * the discovery process. If so, the node will be marked as node port
2795 * recovery for issuing discover IOCB by the link attention handler and
2796 * exit. Otherwise, the response status is checked. If error was reported
2797 * in the response status, the ADISC command shall be retried by invoking
2798 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2799 * the response status, the state machine is invoked to set transition
2800 * with respect to NLP_EVT_CMPL_ADISC event.
2801 **/
2802 static void
lpfc_cmpl_els_adisc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2803 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2804 struct lpfc_iocbq *rspiocb)
2805 {
2806 struct lpfc_vport *vport = cmdiocb->vport;
2807 IOCB_t *irsp;
2808 struct lpfc_nodelist *ndlp;
2809 int disc;
2810 u32 ulp_status, ulp_word4, tmo, iotag;
2811 bool release_node = false;
2812
2813 /* we pass cmdiocb to state machine which needs rspiocb as well */
2814 cmdiocb->rsp_iocb = rspiocb;
2815
2816 ndlp = cmdiocb->ndlp;
2817
2818 ulp_status = get_job_ulpstatus(phba, rspiocb);
2819 ulp_word4 = get_job_word4(phba, rspiocb);
2820
2821 if (phba->sli_rev == LPFC_SLI_REV4) {
2822 tmo = get_wqe_tmo(cmdiocb);
2823 iotag = get_wqe_reqtag(cmdiocb);
2824 } else {
2825 irsp = &rspiocb->iocb;
2826 tmo = irsp->ulpTimeout;
2827 iotag = irsp->ulpIoTag;
2828 }
2829
2830 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2831 "ADISC cmpl: status:x%x/x%x did:x%x",
2832 ulp_status, ulp_word4,
2833 ndlp->nlp_DID);
2834
2835 /* Since ndlp can be freed in the disc state machine, note if this node
2836 * is being used during discovery.
2837 */
2838 spin_lock_irq(&ndlp->lock);
2839 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2840 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2841 spin_unlock_irq(&ndlp->lock);
2842 /* ADISC completes to NPort <nlp_DID> */
2843 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2844 "0104 ADISC completes to NPort x%x "
2845 "IoTag x%x Data: x%x x%x x%x x%x x%x\n",
2846 ndlp->nlp_DID, iotag,
2847 ulp_status, ulp_word4,
2848 tmo, disc, vport->num_disc_nodes);
2849
2850 /* Check to see if link went down during discovery */
2851 if (lpfc_els_chk_latt(vport)) {
2852 spin_lock_irq(&ndlp->lock);
2853 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2854 spin_unlock_irq(&ndlp->lock);
2855 goto out;
2856 }
2857
2858 if (ulp_status) {
2859 /* Check for retry */
2860 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2861 /* ELS command is being retried */
2862 if (disc) {
2863 spin_lock_irq(&ndlp->lock);
2864 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2865 spin_unlock_irq(&ndlp->lock);
2866 lpfc_set_disctmo(vport);
2867 }
2868 goto out;
2869 }
2870 /* ADISC failed */
2871 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2872 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2873 ndlp->nlp_DID, ulp_status,
2874 ulp_word4);
2875 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2876 NLP_EVT_CMPL_ADISC);
2877
2878 /* As long as this node is not registered with the SCSI or NVMe
2879 * transport, it is no longer an active node. Otherwise
2880 * devloss handles the final cleanup.
2881 */
2882 spin_lock_irq(&ndlp->lock);
2883 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
2884 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2885 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
2886 release_node = true;
2887 }
2888 spin_unlock_irq(&ndlp->lock);
2889
2890 if (release_node)
2891 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2892 NLP_EVT_DEVICE_RM);
2893 } else
2894 /* Good status, call state machine */
2895 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2896 NLP_EVT_CMPL_ADISC);
2897
2898 /* Check to see if there are more ADISCs to be sent */
2899 if (disc && vport->num_disc_nodes)
2900 lpfc_more_adisc(vport);
2901 out:
2902 lpfc_els_free_iocb(phba, cmdiocb);
2903 lpfc_nlp_put(ndlp);
2904 return;
2905 }
2906
2907 /**
2908 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2909 * @vport: pointer to a virtual N_Port data structure.
2910 * @ndlp: pointer to a node-list data structure.
2911 * @retry: number of retries to the command IOCB.
2912 *
2913 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2914 * @vport. It prepares the payload of the ADISC ELS command, updates the
2915 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2916 * to issue the ADISC ELS command.
2917 *
2918 * Note that the ndlp reference count will be incremented by 1 for holding the
2919 * ndlp and the reference to ndlp will be stored into the ndlp field of
2920 * the IOCB for the completion callback function to the ADISC ELS command.
2921 *
2922 * Return code
2923 * 0 - successfully issued adisc
2924 * 1 - failed to issue adisc
2925 **/
2926 int
lpfc_issue_els_adisc(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)2927 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2928 uint8_t retry)
2929 {
2930 int rc = 0;
2931 struct lpfc_hba *phba = vport->phba;
2932 ADISC *ap;
2933 struct lpfc_iocbq *elsiocb;
2934 uint8_t *pcmd;
2935 uint16_t cmdsize;
2936
2937 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2938 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2939 ndlp->nlp_DID, ELS_CMD_ADISC);
2940 if (!elsiocb)
2941 return 1;
2942
2943 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
2944
2945 /* For ADISC request, remainder of payload is service parameters */
2946 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
2947 pcmd += sizeof(uint32_t);
2948
2949 /* Fill in ADISC payload */
2950 ap = (ADISC *) pcmd;
2951 ap->hardAL_PA = phba->fc_pref_ALPA;
2952 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2953 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2954 ap->DID = be32_to_cpu(vport->fc_myDID);
2955
2956 phba->fc_stat.elsXmitADISC++;
2957 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc;
2958 spin_lock_irq(&ndlp->lock);
2959 ndlp->nlp_flag |= NLP_ADISC_SND;
2960 spin_unlock_irq(&ndlp->lock);
2961 elsiocb->ndlp = lpfc_nlp_get(ndlp);
2962 if (!elsiocb->ndlp) {
2963 lpfc_els_free_iocb(phba, elsiocb);
2964 goto err;
2965 }
2966
2967 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2968 "Issue ADISC: did:x%x refcnt %d",
2969 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
2970
2971 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2972 if (rc == IOCB_ERROR) {
2973 lpfc_els_free_iocb(phba, elsiocb);
2974 lpfc_nlp_put(ndlp);
2975 goto err;
2976 }
2977
2978 return 0;
2979
2980 err:
2981 spin_lock_irq(&ndlp->lock);
2982 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2983 spin_unlock_irq(&ndlp->lock);
2984 return 1;
2985 }
2986
2987 /**
2988 * lpfc_cmpl_els_logo - Completion callback function for logo
2989 * @phba: pointer to lpfc hba data structure.
2990 * @cmdiocb: pointer to lpfc command iocb data structure.
2991 * @rspiocb: pointer to lpfc response iocb data structure.
2992 *
2993 * This routine is the completion function for issuing the ELS Logout (LOGO)
2994 * command. If no error status was reported from the LOGO response, the
2995 * state machine of the associated ndlp shall be invoked for transition with
2996 * respect to NLP_EVT_CMPL_LOGO event.
2997 **/
2998 static void
lpfc_cmpl_els_logo(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2999 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3000 struct lpfc_iocbq *rspiocb)
3001 {
3002 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
3003 struct lpfc_vport *vport = ndlp->vport;
3004 IOCB_t *irsp;
3005 unsigned long flags;
3006 uint32_t skip_recovery = 0;
3007 int wake_up_waiter = 0;
3008 u32 ulp_status;
3009 u32 ulp_word4;
3010 u32 tmo, iotag;
3011
3012 /* we pass cmdiocb to state machine which needs rspiocb as well */
3013 cmdiocb->rsp_iocb = rspiocb;
3014
3015 ulp_status = get_job_ulpstatus(phba, rspiocb);
3016 ulp_word4 = get_job_word4(phba, rspiocb);
3017
3018 if (phba->sli_rev == LPFC_SLI_REV4) {
3019 tmo = get_wqe_tmo(cmdiocb);
3020 iotag = get_wqe_reqtag(cmdiocb);
3021 } else {
3022 irsp = &rspiocb->iocb;
3023 tmo = irsp->ulpTimeout;
3024 iotag = irsp->ulpIoTag;
3025 }
3026
3027 spin_lock_irq(&ndlp->lock);
3028 ndlp->nlp_flag &= ~NLP_LOGO_SND;
3029 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
3030 wake_up_waiter = 1;
3031 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
3032 }
3033 spin_unlock_irq(&ndlp->lock);
3034
3035 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3036 "LOGO cmpl: status:x%x/x%x did:x%x",
3037 ulp_status, ulp_word4,
3038 ndlp->nlp_DID);
3039
3040 /* LOGO completes to NPort <nlp_DID> */
3041 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3042 "0105 LOGO completes to NPort x%x "
3043 "IoTag x%x refcnt %d nflags x%x xflags x%x "
3044 "Data: x%x x%x x%x x%x\n",
3045 ndlp->nlp_DID, iotag,
3046 kref_read(&ndlp->kref), ndlp->nlp_flag,
3047 ndlp->fc4_xpt_flags, ulp_status, ulp_word4,
3048 tmo, vport->num_disc_nodes);
3049
3050 if (lpfc_els_chk_latt(vport)) {
3051 skip_recovery = 1;
3052 goto out;
3053 }
3054
3055 /* The LOGO will not be retried on failure. A LOGO was
3056 * issued to the remote rport and a ACC or RJT or no Answer are
3057 * all acceptable. Note the failure and move forward with
3058 * discovery. The PLOGI will retry.
3059 */
3060 if (ulp_status) {
3061 /* LOGO failed */
3062 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3063 "2756 LOGO failure, No Retry DID:%06X "
3064 "Status:x%x/x%x\n",
3065 ndlp->nlp_DID, ulp_status,
3066 ulp_word4);
3067
3068 if (lpfc_error_lost_link(vport, ulp_status, ulp_word4))
3069 skip_recovery = 1;
3070 }
3071
3072 /* Call state machine. This will unregister the rpi if needed. */
3073 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
3074
3075 if (skip_recovery)
3076 goto out;
3077
3078 /* The driver sets this flag for an NPIV instance that doesn't want to
3079 * log into the remote port.
3080 */
3081 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
3082 spin_lock_irq(&ndlp->lock);
3083 if (phba->sli_rev == LPFC_SLI_REV4)
3084 ndlp->nlp_flag |= NLP_RELEASE_RPI;
3085 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
3086 spin_unlock_irq(&ndlp->lock);
3087 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
3088 NLP_EVT_DEVICE_RM);
3089 goto out_rsrc_free;
3090 }
3091
3092 out:
3093 /* At this point, the LOGO processing is complete. NOTE: For a
3094 * pt2pt topology, we are assuming the NPortID will only change
3095 * on link up processing. For a LOGO / PLOGI initiated by the
3096 * Initiator, we are assuming the NPortID is not going to change.
3097 */
3098
3099 if (wake_up_waiter && ndlp->logo_waitq)
3100 wake_up(ndlp->logo_waitq);
3101 /*
3102 * If the node is a target, the handling attempts to recover the port.
3103 * For any other port type, the rpi is unregistered as an implicit
3104 * LOGO.
3105 */
3106 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
3107 skip_recovery == 0) {
3108 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3109 spin_lock_irqsave(&ndlp->lock, flags);
3110 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
3111 spin_unlock_irqrestore(&ndlp->lock, flags);
3112
3113 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3114 "3187 LOGO completes to NPort x%x: Start "
3115 "Recovery Data: x%x x%x x%x x%x\n",
3116 ndlp->nlp_DID, ulp_status,
3117 ulp_word4, tmo,
3118 vport->num_disc_nodes);
3119
3120 lpfc_els_free_iocb(phba, cmdiocb);
3121 lpfc_nlp_put(ndlp);
3122
3123 lpfc_disc_start(vport);
3124 return;
3125 }
3126
3127 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the
3128 * driver sends a LOGO to the rport to cleanup. For fabric and
3129 * initiator ports cleanup the node as long as it the node is not
3130 * register with the transport.
3131 */
3132 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
3133 spin_lock_irq(&ndlp->lock);
3134 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
3135 spin_unlock_irq(&ndlp->lock);
3136 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
3137 NLP_EVT_DEVICE_RM);
3138 }
3139 out_rsrc_free:
3140 /* Driver is done with the I/O. */
3141 lpfc_els_free_iocb(phba, cmdiocb);
3142 lpfc_nlp_put(ndlp);
3143 }
3144
3145 /**
3146 * lpfc_issue_els_logo - Issue a logo to an node on a vport
3147 * @vport: pointer to a virtual N_Port data structure.
3148 * @ndlp: pointer to a node-list data structure.
3149 * @retry: number of retries to the command IOCB.
3150 *
3151 * This routine constructs and issues an ELS Logout (LOGO) iocb command
3152 * to a remote node, referred by an @ndlp on a @vport. It constructs the
3153 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
3154 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
3155 *
3156 * Note that the ndlp reference count will be incremented by 1 for holding the
3157 * ndlp and the reference to ndlp will be stored into the ndlp field of
3158 * the IOCB for the completion callback function to the LOGO ELS command.
3159 *
3160 * Callers of this routine are expected to unregister the RPI first
3161 *
3162 * Return code
3163 * 0 - successfully issued logo
3164 * 1 - failed to issue logo
3165 **/
3166 int
lpfc_issue_els_logo(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)3167 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3168 uint8_t retry)
3169 {
3170 struct lpfc_hba *phba = vport->phba;
3171 struct lpfc_iocbq *elsiocb;
3172 uint8_t *pcmd;
3173 uint16_t cmdsize;
3174 int rc;
3175
3176 spin_lock_irq(&ndlp->lock);
3177 if (ndlp->nlp_flag & NLP_LOGO_SND) {
3178 spin_unlock_irq(&ndlp->lock);
3179 return 0;
3180 }
3181 spin_unlock_irq(&ndlp->lock);
3182
3183 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
3184 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3185 ndlp->nlp_DID, ELS_CMD_LOGO);
3186 if (!elsiocb)
3187 return 1;
3188
3189 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
3190 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
3191 pcmd += sizeof(uint32_t);
3192
3193 /* Fill in LOGO payload */
3194 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
3195 pcmd += sizeof(uint32_t);
3196 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
3197
3198 phba->fc_stat.elsXmitLOGO++;
3199 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo;
3200 spin_lock_irq(&ndlp->lock);
3201 ndlp->nlp_flag |= NLP_LOGO_SND;
3202 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
3203 spin_unlock_irq(&ndlp->lock);
3204 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3205 if (!elsiocb->ndlp) {
3206 lpfc_els_free_iocb(phba, elsiocb);
3207 goto err;
3208 }
3209
3210 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3211 "Issue LOGO: did:x%x refcnt %d",
3212 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
3213
3214 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3215 if (rc == IOCB_ERROR) {
3216 lpfc_els_free_iocb(phba, elsiocb);
3217 lpfc_nlp_put(ndlp);
3218 goto err;
3219 }
3220
3221 spin_lock_irq(&ndlp->lock);
3222 ndlp->nlp_prev_state = ndlp->nlp_state;
3223 spin_unlock_irq(&ndlp->lock);
3224 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3225 return 0;
3226
3227 err:
3228 spin_lock_irq(&ndlp->lock);
3229 ndlp->nlp_flag &= ~NLP_LOGO_SND;
3230 spin_unlock_irq(&ndlp->lock);
3231 return 1;
3232 }
3233
3234 /**
3235 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
3236 * @phba: pointer to lpfc hba data structure.
3237 * @cmdiocb: pointer to lpfc command iocb data structure.
3238 * @rspiocb: pointer to lpfc response iocb data structure.
3239 *
3240 * This routine is a generic completion callback function for ELS commands.
3241 * Specifically, it is the callback function which does not need to perform
3242 * any command specific operations. It is currently used by the ELS command
3243 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel
3244 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr().
3245 * Other than certain debug loggings, this callback function simply invokes the
3246 * lpfc_els_chk_latt() routine to check whether link went down during the
3247 * discovery process.
3248 **/
3249 static void
lpfc_cmpl_els_cmd(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)3250 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3251 struct lpfc_iocbq *rspiocb)
3252 {
3253 struct lpfc_vport *vport = cmdiocb->vport;
3254 struct lpfc_nodelist *free_ndlp;
3255 IOCB_t *irsp;
3256 u32 ulp_status, ulp_word4, tmo, did, iotag;
3257
3258 ulp_status = get_job_ulpstatus(phba, rspiocb);
3259 ulp_word4 = get_job_word4(phba, rspiocb);
3260 did = get_job_els_rsp64_did(phba, cmdiocb);
3261
3262 if (phba->sli_rev == LPFC_SLI_REV4) {
3263 tmo = get_wqe_tmo(cmdiocb);
3264 iotag = get_wqe_reqtag(cmdiocb);
3265 } else {
3266 irsp = &rspiocb->iocb;
3267 tmo = irsp->ulpTimeout;
3268 iotag = irsp->ulpIoTag;
3269 }
3270
3271 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3272 "ELS cmd cmpl: status:x%x/x%x did:x%x",
3273 ulp_status, ulp_word4, did);
3274
3275 /* ELS cmd tag <ulpIoTag> completes */
3276 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3277 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
3278 iotag, ulp_status, ulp_word4, tmo);
3279
3280 /* Check to see if link went down during discovery */
3281 lpfc_els_chk_latt(vport);
3282
3283 free_ndlp = cmdiocb->ndlp;
3284
3285 lpfc_els_free_iocb(phba, cmdiocb);
3286 lpfc_nlp_put(free_ndlp);
3287 }
3288
3289 /**
3290 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node.
3291 * @vport: pointer to lpfc_vport data structure.
3292 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node.
3293 *
3294 * This routine registers the rpi assigned to the fabric controller
3295 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED
3296 * state triggering a registration with the SCSI transport.
3297 *
3298 * This routine is single out because the fabric controller node
3299 * does not receive a PLOGI. This routine is consumed by the
3300 * SCR and RDF ELS commands. Callers are expected to qualify
3301 * with SLI4 first.
3302 **/
3303 static int
lpfc_reg_fab_ctrl_node(struct lpfc_vport * vport,struct lpfc_nodelist * fc_ndlp)3304 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
3305 {
3306 int rc = 0;
3307 struct lpfc_hba *phba = vport->phba;
3308 struct lpfc_nodelist *ns_ndlp;
3309 LPFC_MBOXQ_t *mbox;
3310
3311 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED)
3312 return rc;
3313
3314 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
3315 if (!ns_ndlp)
3316 return -ENODEV;
3317
3318 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3319 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n",
3320 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID,
3321 ns_ndlp->nlp_state);
3322 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
3323 return -ENODEV;
3324
3325 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3326 if (!mbox) {
3327 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3328 "0936 %s: no memory for reg_login "
3329 "Data: x%x x%x x%x x%x\n", __func__,
3330 fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
3331 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
3332 return -ENOMEM;
3333 }
3334 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID,
3335 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi);
3336 if (rc) {
3337 rc = -EACCES;
3338 goto out;
3339 }
3340
3341 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
3342 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login;
3343 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp);
3344 if (!mbox->ctx_ndlp) {
3345 rc = -ENOMEM;
3346 goto out;
3347 }
3348
3349 mbox->vport = vport;
3350 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3351 if (rc == MBX_NOT_FINISHED) {
3352 rc = -ENODEV;
3353 lpfc_nlp_put(fc_ndlp);
3354 goto out;
3355 }
3356 /* Success path. Exit. */
3357 lpfc_nlp_set_state(vport, fc_ndlp,
3358 NLP_STE_REG_LOGIN_ISSUE);
3359 return 0;
3360
3361 out:
3362 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
3363 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3364 "0938 %s: failed to format reg_login "
3365 "Data: x%x x%x x%x x%x\n", __func__,
3366 fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
3367 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
3368 return rc;
3369 }
3370
3371 /**
3372 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd
3373 * @phba: pointer to lpfc hba data structure.
3374 * @cmdiocb: pointer to lpfc command iocb data structure.
3375 * @rspiocb: pointer to lpfc response iocb data structure.
3376 *
3377 * This routine is a generic completion callback function for Discovery ELS cmd.
3378 * Currently used by the ELS command issuing routines for the ELS State Change
3379 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
3380 * These commands will be retried once only for ELS timeout errors.
3381 **/
3382 static void
lpfc_cmpl_els_disc_cmd(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)3383 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3384 struct lpfc_iocbq *rspiocb)
3385 {
3386 struct lpfc_vport *vport = cmdiocb->vport;
3387 IOCB_t *irsp;
3388 struct lpfc_els_rdf_rsp *prdf;
3389 struct lpfc_dmabuf *pcmd, *prsp;
3390 u32 *pdata;
3391 u32 cmd;
3392 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
3393 u32 ulp_status, ulp_word4, tmo, did, iotag;
3394
3395 ulp_status = get_job_ulpstatus(phba, rspiocb);
3396 ulp_word4 = get_job_word4(phba, rspiocb);
3397 did = get_job_els_rsp64_did(phba, cmdiocb);
3398
3399 if (phba->sli_rev == LPFC_SLI_REV4) {
3400 tmo = get_wqe_tmo(cmdiocb);
3401 iotag = get_wqe_reqtag(cmdiocb);
3402 } else {
3403 irsp = &rspiocb->iocb;
3404 tmo = irsp->ulpTimeout;
3405 iotag = irsp->ulpIoTag;
3406 }
3407
3408 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3409 "ELS cmd cmpl: status:x%x/x%x did:x%x",
3410 ulp_status, ulp_word4, did);
3411
3412 /* ELS cmd tag <ulpIoTag> completes */
3413 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3414 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n",
3415 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry);
3416
3417 pcmd = cmdiocb->cmd_dmabuf;
3418 if (!pcmd)
3419 goto out;
3420
3421 pdata = (u32 *)pcmd->virt;
3422 if (!pdata)
3423 goto out;
3424 cmd = *pdata;
3425
3426 /* Only 1 retry for ELS Timeout only */
3427 if (ulp_status == IOSTAT_LOCAL_REJECT &&
3428 ((ulp_word4 & IOERR_PARAM_MASK) ==
3429 IOERR_SEQUENCE_TIMEOUT)) {
3430 cmdiocb->retry++;
3431 if (cmdiocb->retry <= 1) {
3432 switch (cmd) {
3433 case ELS_CMD_SCR:
3434 lpfc_issue_els_scr(vport, cmdiocb->retry);
3435 break;
3436 case ELS_CMD_EDC:
3437 lpfc_issue_els_edc(vport, cmdiocb->retry);
3438 break;
3439 case ELS_CMD_RDF:
3440 lpfc_issue_els_rdf(vport, cmdiocb->retry);
3441 break;
3442 }
3443 goto out;
3444 }
3445 phba->fc_stat.elsRetryExceeded++;
3446 }
3447 if (cmd == ELS_CMD_EDC) {
3448 /* must be called before checking uplStatus and returning */
3449 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
3450 return;
3451 }
3452 if (ulp_status) {
3453 /* ELS discovery cmd completes with error */
3454 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
3455 "4203 ELS cmd x%x error: x%x x%X\n", cmd,
3456 ulp_status, ulp_word4);
3457 goto out;
3458 }
3459
3460 /* The RDF response doesn't have any impact on the running driver
3461 * but the notification descriptors are dumped here for support.
3462 */
3463 if (cmd == ELS_CMD_RDF) {
3464 int i;
3465
3466 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
3467 if (!prsp)
3468 goto out;
3469
3470 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt;
3471 if (!prdf)
3472 goto out;
3473 if (!lpfc_is_els_acc_rsp(prsp))
3474 goto out;
3475
3476 for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
3477 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
3478 lpfc_printf_vlog(vport, KERN_INFO,
3479 LOG_ELS | LOG_CGN_MGMT,
3480 "4677 Fabric RDF Notification Grant "
3481 "Data: 0x%08x Reg: %x %x\n",
3482 be32_to_cpu(
3483 prdf->reg_d1.desc_tags[i]),
3484 phba->cgn_reg_signal,
3485 phba->cgn_reg_fpin);
3486 }
3487
3488 out:
3489 /* Check to see if link went down during discovery */
3490 lpfc_els_chk_latt(vport);
3491 lpfc_els_free_iocb(phba, cmdiocb);
3492 lpfc_nlp_put(ndlp);
3493 return;
3494 }
3495
3496 /**
3497 * lpfc_issue_els_scr - Issue a scr to an node on a vport
3498 * @vport: pointer to a host virtual N_Port data structure.
3499 * @retry: retry counter for the command IOCB.
3500 *
3501 * This routine issues a State Change Request (SCR) to a fabric node
3502 * on a @vport. The remote node is Fabric Controller (0xfffffd). It
3503 * first search the @vport node list to find the matching ndlp. If no such
3504 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
3505 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
3506 * routine is invoked to send the SCR IOCB.
3507 *
3508 * Note that the ndlp reference count will be incremented by 1 for holding the
3509 * ndlp and the reference to ndlp will be stored into the ndlp field of
3510 * the IOCB for the completion callback function to the SCR ELS command.
3511 *
3512 * Return code
3513 * 0 - Successfully issued scr command
3514 * 1 - Failed to issue scr command
3515 **/
3516 int
lpfc_issue_els_scr(struct lpfc_vport * vport,uint8_t retry)3517 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
3518 {
3519 int rc = 0;
3520 struct lpfc_hba *phba = vport->phba;
3521 struct lpfc_iocbq *elsiocb;
3522 uint8_t *pcmd;
3523 uint16_t cmdsize;
3524 struct lpfc_nodelist *ndlp;
3525
3526 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
3527
3528 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
3529 if (!ndlp) {
3530 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
3531 if (!ndlp)
3532 return 1;
3533 lpfc_enqueue_node(vport, ndlp);
3534 }
3535
3536 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3537 ndlp->nlp_DID, ELS_CMD_SCR);
3538 if (!elsiocb)
3539 return 1;
3540
3541 if (phba->sli_rev == LPFC_SLI_REV4) {
3542 rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
3543 if (rc) {
3544 lpfc_els_free_iocb(phba, elsiocb);
3545 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3546 "0937 %s: Failed to reg fc node, rc %d\n",
3547 __func__, rc);
3548 return 1;
3549 }
3550 }
3551 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
3552
3553 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
3554 pcmd += sizeof(uint32_t);
3555
3556 /* For SCR, remainder of payload is SCR parameter page */
3557 memset(pcmd, 0, sizeof(SCR));
3558 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
3559
3560 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3561 "Issue SCR: did:x%x",
3562 ndlp->nlp_DID, 0, 0);
3563
3564 phba->fc_stat.elsXmitSCR++;
3565 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
3566 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3567 if (!elsiocb->ndlp) {
3568 lpfc_els_free_iocb(phba, elsiocb);
3569 return 1;
3570 }
3571
3572 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3573 "Issue SCR: did:x%x refcnt %d",
3574 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
3575
3576 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3577 if (rc == IOCB_ERROR) {
3578 lpfc_els_free_iocb(phba, elsiocb);
3579 lpfc_nlp_put(ndlp);
3580 return 1;
3581 }
3582
3583 return 0;
3584 }
3585
3586 /**
3587 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric)
3588 * or the other nport (pt2pt).
3589 * @vport: pointer to a host virtual N_Port data structure.
3590 * @retry: number of retries to the command IOCB.
3591 *
3592 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD)
3593 * when connected to a fabric, or to the remote port when connected
3594 * in point-to-point mode. When sent to the Fabric Controller, it will
3595 * replay the RSCN to registered recipients.
3596 *
3597 * Note that the ndlp reference count will be incremented by 1 for holding the
3598 * ndlp and the reference to ndlp will be stored into the ndlp field of
3599 * the IOCB for the completion callback function to the RSCN ELS command.
3600 *
3601 * Return code
3602 * 0 - Successfully issued RSCN command
3603 * 1 - Failed to issue RSCN command
3604 **/
3605 int
lpfc_issue_els_rscn(struct lpfc_vport * vport,uint8_t retry)3606 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
3607 {
3608 int rc = 0;
3609 struct lpfc_hba *phba = vport->phba;
3610 struct lpfc_iocbq *elsiocb;
3611 struct lpfc_nodelist *ndlp;
3612 struct {
3613 struct fc_els_rscn rscn;
3614 struct fc_els_rscn_page portid;
3615 } *event;
3616 uint32_t nportid;
3617 uint16_t cmdsize = sizeof(*event);
3618
3619 /* Not supported for private loop */
3620 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
3621 !(vport->fc_flag & FC_PUBLIC_LOOP))
3622 return 1;
3623
3624 if (vport->fc_flag & FC_PT2PT) {
3625 /* find any mapped nport - that would be the other nport */
3626 ndlp = lpfc_findnode_mapped(vport);
3627 if (!ndlp)
3628 return 1;
3629 } else {
3630 nportid = FC_FID_FCTRL;
3631 /* find the fabric controller node */
3632 ndlp = lpfc_findnode_did(vport, nportid);
3633 if (!ndlp) {
3634 /* if one didn't exist, make one */
3635 ndlp = lpfc_nlp_init(vport, nportid);
3636 if (!ndlp)
3637 return 1;
3638 lpfc_enqueue_node(vport, ndlp);
3639 }
3640 }
3641
3642 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3643 ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
3644
3645 if (!elsiocb)
3646 return 1;
3647
3648 event = elsiocb->cmd_dmabuf->virt;
3649
3650 event->rscn.rscn_cmd = ELS_RSCN;
3651 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
3652 event->rscn.rscn_plen = cpu_to_be16(cmdsize);
3653
3654 nportid = vport->fc_myDID;
3655 /* appears that page flags must be 0 for fabric to broadcast RSCN */
3656 event->portid.rscn_page_flags = 0;
3657 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16;
3658 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
3659 event->portid.rscn_fid[2] = nportid & 0x000000FF;
3660
3661 phba->fc_stat.elsXmitRSCN++;
3662 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
3663 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3664 if (!elsiocb->ndlp) {
3665 lpfc_els_free_iocb(phba, elsiocb);
3666 return 1;
3667 }
3668
3669 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3670 "Issue RSCN: did:x%x",
3671 ndlp->nlp_DID, 0, 0);
3672
3673 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3674 if (rc == IOCB_ERROR) {
3675 lpfc_els_free_iocb(phba, elsiocb);
3676 lpfc_nlp_put(ndlp);
3677 return 1;
3678 }
3679
3680 return 0;
3681 }
3682
3683 /**
3684 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
3685 * @vport: pointer to a host virtual N_Port data structure.
3686 * @nportid: N_Port identifier to the remote node.
3687 * @retry: number of retries to the command IOCB.
3688 *
3689 * This routine issues a Fibre Channel Address Resolution Response
3690 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
3691 * is passed into the function. It first search the @vport node list to find
3692 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
3693 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
3694 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
3695 *
3696 * Note that the ndlp reference count will be incremented by 1 for holding the
3697 * ndlp and the reference to ndlp will be stored into the ndlp field of
3698 * the IOCB for the completion callback function to the FARPR ELS command.
3699 *
3700 * Return code
3701 * 0 - Successfully issued farpr command
3702 * 1 - Failed to issue farpr command
3703 **/
3704 static int
lpfc_issue_els_farpr(struct lpfc_vport * vport,uint32_t nportid,uint8_t retry)3705 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
3706 {
3707 int rc = 0;
3708 struct lpfc_hba *phba = vport->phba;
3709 struct lpfc_iocbq *elsiocb;
3710 FARP *fp;
3711 uint8_t *pcmd;
3712 uint32_t *lp;
3713 uint16_t cmdsize;
3714 struct lpfc_nodelist *ondlp;
3715 struct lpfc_nodelist *ndlp;
3716
3717 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
3718
3719 ndlp = lpfc_findnode_did(vport, nportid);
3720 if (!ndlp) {
3721 ndlp = lpfc_nlp_init(vport, nportid);
3722 if (!ndlp)
3723 return 1;
3724 lpfc_enqueue_node(vport, ndlp);
3725 }
3726
3727 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3728 ndlp->nlp_DID, ELS_CMD_FARPR);
3729 if (!elsiocb)
3730 return 1;
3731
3732 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
3733
3734 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
3735 pcmd += sizeof(uint32_t);
3736
3737 /* Fill in FARPR payload */
3738 fp = (FARP *) (pcmd);
3739 memset(fp, 0, sizeof(FARP));
3740 lp = (uint32_t *) pcmd;
3741 *lp++ = be32_to_cpu(nportid);
3742 *lp++ = be32_to_cpu(vport->fc_myDID);
3743 fp->Rflags = 0;
3744 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
3745
3746 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
3747 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3748 ondlp = lpfc_findnode_did(vport, nportid);
3749 if (ondlp) {
3750 memcpy(&fp->OportName, &ondlp->nlp_portname,
3751 sizeof(struct lpfc_name));
3752 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
3753 sizeof(struct lpfc_name));
3754 }
3755
3756 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3757 "Issue FARPR: did:x%x",
3758 ndlp->nlp_DID, 0, 0);
3759
3760 phba->fc_stat.elsXmitFARPR++;
3761 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
3762 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3763 if (!elsiocb->ndlp) {
3764 lpfc_els_free_iocb(phba, elsiocb);
3765 return 1;
3766 }
3767
3768 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3769 if (rc == IOCB_ERROR) {
3770 /* The additional lpfc_nlp_put will cause the following
3771 * lpfc_els_free_iocb routine to trigger the release of
3772 * the node.
3773 */
3774 lpfc_els_free_iocb(phba, elsiocb);
3775 lpfc_nlp_put(ndlp);
3776 return 1;
3777 }
3778 /* This will cause the callback-function lpfc_cmpl_els_cmd to
3779 * trigger the release of the node.
3780 */
3781 /* Don't release reference count as RDF is likely outstanding */
3782 return 0;
3783 }
3784
3785 /**
3786 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric.
3787 * @vport: pointer to a host virtual N_Port data structure.
3788 * @retry: retry counter for the command IOCB.
3789 *
3790 * This routine issues an ELS RDF to the Fabric Controller to register
3791 * for diagnostic functions.
3792 *
3793 * Note that the ndlp reference count will be incremented by 1 for holding the
3794 * ndlp and the reference to ndlp will be stored into the ndlp field of
3795 * the IOCB for the completion callback function to the RDF ELS command.
3796 *
3797 * Return code
3798 * 0 - Successfully issued rdf command
3799 * 1 - Failed to issue rdf command
3800 **/
3801 int
lpfc_issue_els_rdf(struct lpfc_vport * vport,uint8_t retry)3802 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
3803 {
3804 struct lpfc_hba *phba = vport->phba;
3805 struct lpfc_iocbq *elsiocb;
3806 struct lpfc_els_rdf_req *prdf;
3807 struct lpfc_nodelist *ndlp;
3808 uint16_t cmdsize;
3809 int rc;
3810
3811 cmdsize = sizeof(*prdf);
3812
3813 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
3814 if (!ndlp) {
3815 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
3816 if (!ndlp)
3817 return -ENODEV;
3818 lpfc_enqueue_node(vport, ndlp);
3819 }
3820
3821 /* RDF ELS is not required on an NPIV VN_Port. */
3822 if (vport->port_type == LPFC_NPIV_PORT)
3823 return -EACCES;
3824
3825 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3826 ndlp->nlp_DID, ELS_CMD_RDF);
3827 if (!elsiocb)
3828 return -ENOMEM;
3829
3830 /* Configure the payload for the supported FPIN events. */
3831 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt;
3832 memset(prdf, 0, cmdsize);
3833 prdf->rdf.fpin_cmd = ELS_RDF;
3834 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
3835 sizeof(struct fc_els_rdf));
3836 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
3837 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
3838 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
3839 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
3840 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
3841 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY);
3842 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
3843 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
3844
3845 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3846 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n",
3847 ndlp->nlp_DID, phba->cgn_reg_signal,
3848 phba->cgn_reg_fpin);
3849
3850 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ;
3851 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
3852 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3853 if (!elsiocb->ndlp) {
3854 lpfc_els_free_iocb(phba, elsiocb);
3855 return -EIO;
3856 }
3857
3858 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3859 "Issue RDF: did:x%x refcnt %d",
3860 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
3861
3862 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3863 if (rc == IOCB_ERROR) {
3864 lpfc_els_free_iocb(phba, elsiocb);
3865 lpfc_nlp_put(ndlp);
3866 return -EIO;
3867 }
3868 return 0;
3869 }
3870
3871 /**
3872 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric.
3873 * @vport: pointer to a host virtual N_Port data structure.
3874 * @cmdiocb: pointer to lpfc command iocb data structure.
3875 * @ndlp: pointer to a node-list data structure.
3876 *
3877 * A received RDF implies a possible change to fabric supported diagnostic
3878 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new
3879 * RDF request to reregister for supported diagnostic functions.
3880 *
3881 * Return code
3882 * 0 - Success
3883 * -EIO - Failed to process received RDF
3884 **/
3885 static int
lpfc_els_rcv_rdf(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)3886 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3887 struct lpfc_nodelist *ndlp)
3888 {
3889 /* Send LS_ACC */
3890 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
3891 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3892 "1623 Failed to RDF_ACC from x%x for x%x\n",
3893 ndlp->nlp_DID, vport->fc_myDID);
3894 return -EIO;
3895 }
3896
3897 /* Issue new RDF for reregistering */
3898 if (lpfc_issue_els_rdf(vport, 0)) {
3899 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3900 "2623 Failed to re register RDF for x%x\n",
3901 vport->fc_myDID);
3902 return -EIO;
3903 }
3904
3905 return 0;
3906 }
3907
3908 /**
3909 * lpfc_least_capable_settings - helper function for EDC rsp processing
3910 * @phba: pointer to lpfc hba data structure.
3911 * @pcgd: pointer to congestion detection descriptor in EDC rsp.
3912 *
3913 * This helper routine determines the least capable setting for
3914 * congestion signals, signal freq, including scale, from the
3915 * congestion detection descriptor in the EDC rsp. The routine
3916 * sets @phba values in preparation for a set_featues mailbox.
3917 **/
3918 static void
lpfc_least_capable_settings(struct lpfc_hba * phba,struct fc_diag_cg_sig_desc * pcgd)3919 lpfc_least_capable_settings(struct lpfc_hba *phba,
3920 struct fc_diag_cg_sig_desc *pcgd)
3921 {
3922 u32 rsp_sig_cap = 0, drv_sig_cap = 0;
3923 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0;
3924
3925 /* Get rsp signal and frequency capabilities. */
3926 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability);
3927 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count);
3928 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units);
3929
3930 /* If the Fport does not support signals. Set FPIN only */
3931 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED)
3932 goto out_no_support;
3933
3934 /* Apply the xmt scale to the xmt cycle to get the correct frequency.
3935 * Adapter default is 100 millisSeconds. Convert all xmt cycle values
3936 * to milliSeconds.
3937 */
3938 switch (rsp_sig_freq_scale) {
3939 case EDC_CG_SIGFREQ_SEC:
3940 rsp_sig_freq_cyc *= MSEC_PER_SEC;
3941 break;
3942 case EDC_CG_SIGFREQ_MSEC:
3943 rsp_sig_freq_cyc = 1;
3944 break;
3945 default:
3946 goto out_no_support;
3947 }
3948
3949 /* Convenient shorthand. */
3950 drv_sig_cap = phba->cgn_reg_signal;
3951
3952 /* Choose the least capable frequency. */
3953 if (rsp_sig_freq_cyc > phba->cgn_sig_freq)
3954 phba->cgn_sig_freq = rsp_sig_freq_cyc;
3955
3956 /* Should be some common signals support. Settle on least capable
3957 * signal and adjust FPIN values. Initialize defaults to ease the
3958 * decision.
3959 */
3960 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
3961 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
3962 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY &&
3963 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY ||
3964 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) {
3965 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
3966 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
3967 }
3968 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) {
3969 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) {
3970 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM;
3971 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE;
3972 }
3973 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) {
3974 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
3975 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
3976 }
3977 }
3978
3979 /* We are NOT recording signal frequency in congestion info buffer */
3980 return;
3981
3982 out_no_support:
3983 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
3984 phba->cgn_sig_freq = 0;
3985 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
3986 }
3987
3988 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
3989 FC_LS_TLV_DTAG_INIT);
3990
3991 /**
3992 * lpfc_cmpl_els_edc - Completion callback function for EDC
3993 * @phba: pointer to lpfc hba data structure.
3994 * @cmdiocb: pointer to lpfc command iocb data structure.
3995 * @rspiocb: pointer to lpfc response iocb data structure.
3996 *
3997 * This routine is the completion callback function for issuing the Exchange
3998 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to
3999 * notify the FPort of its Congestion and Link Fault capabilities. This
4000 * routine parses the FPort's response and decides on the least common
4001 * values applicable to both FPort and NPort for Warnings and Alarms that
4002 * are communicated via hardware signals.
4003 **/
4004 static void
lpfc_cmpl_els_edc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)4005 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4006 struct lpfc_iocbq *rspiocb)
4007 {
4008 IOCB_t *irsp_iocb;
4009 struct fc_els_edc_resp *edc_rsp;
4010 struct fc_tlv_desc *tlv;
4011 struct fc_diag_cg_sig_desc *pcgd;
4012 struct fc_diag_lnkflt_desc *plnkflt;
4013 struct lpfc_dmabuf *pcmd, *prsp;
4014 const char *dtag_nm;
4015 u32 *pdata, dtag;
4016 int desc_cnt = 0, bytes_remain;
4017 bool rcv_cap_desc = false;
4018 struct lpfc_nodelist *ndlp;
4019 u32 ulp_status, ulp_word4, tmo, did, iotag;
4020
4021 ndlp = cmdiocb->ndlp;
4022
4023 ulp_status = get_job_ulpstatus(phba, rspiocb);
4024 ulp_word4 = get_job_word4(phba, rspiocb);
4025 did = get_job_els_rsp64_did(phba, rspiocb);
4026
4027 if (phba->sli_rev == LPFC_SLI_REV4) {
4028 tmo = get_wqe_tmo(rspiocb);
4029 iotag = get_wqe_reqtag(rspiocb);
4030 } else {
4031 irsp_iocb = &rspiocb->iocb;
4032 tmo = irsp_iocb->ulpTimeout;
4033 iotag = irsp_iocb->ulpIoTag;
4034 }
4035
4036 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
4037 "EDC cmpl: status:x%x/x%x did:x%x",
4038 ulp_status, ulp_word4, did);
4039
4040 /* ELS cmd tag <ulpIoTag> completes */
4041 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
4042 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n",
4043 iotag, ulp_status, ulp_word4, tmo);
4044
4045 pcmd = cmdiocb->cmd_dmabuf;
4046 if (!pcmd)
4047 goto out;
4048
4049 pdata = (u32 *)pcmd->virt;
4050 if (!pdata)
4051 goto out;
4052
4053 /* Need to clear signal values, send features MB and RDF with FPIN. */
4054 if (ulp_status)
4055 goto out;
4056
4057 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
4058 if (!prsp)
4059 goto out;
4060
4061 edc_rsp = prsp->virt;
4062 if (!edc_rsp)
4063 goto out;
4064
4065 /* ELS cmd tag <ulpIoTag> completes */
4066 lpfc_printf_log(phba, KERN_INFO,
4067 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
4068 "4676 Fabric EDC Rsp: "
4069 "0x%02x, 0x%08x\n",
4070 edc_rsp->acc_hdr.la_cmd,
4071 be32_to_cpu(edc_rsp->desc_list_len));
4072
4073 if (!lpfc_is_els_acc_rsp(prsp))
4074 goto out;
4075
4076 /*
4077 * Payload length in bytes is the response descriptor list
4078 * length minus the 12 bytes of Link Service Request
4079 * Information descriptor in the reply.
4080 */
4081 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) -
4082 sizeof(struct fc_els_lsri_desc);
4083 if (bytes_remain <= 0)
4084 goto out;
4085
4086 tlv = edc_rsp->desc;
4087
4088 /*
4089 * cycle through EDC diagnostic descriptors to find the
4090 * congestion signaling capability descriptor
4091 */
4092 while (bytes_remain) {
4093 if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
4094 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
4095 "6461 Truncated TLV hdr on "
4096 "Diagnostic descriptor[%d]\n",
4097 desc_cnt);
4098 goto out;
4099 }
4100
4101 dtag = be32_to_cpu(tlv->desc_tag);
4102 switch (dtag) {
4103 case ELS_DTAG_LNK_FAULT_CAP:
4104 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
4105 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
4106 sizeof(struct fc_diag_lnkflt_desc)) {
4107 lpfc_printf_log(phba, KERN_WARNING,
4108 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
4109 "6462 Truncated Link Fault Diagnostic "
4110 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
4111 desc_cnt, bytes_remain,
4112 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
4113 sizeof(struct fc_diag_lnkflt_desc));
4114 goto out;
4115 }
4116 plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
4117 lpfc_printf_log(phba, KERN_INFO,
4118 LOG_ELS | LOG_LDS_EVENT,
4119 "4617 Link Fault Desc Data: 0x%08x 0x%08x "
4120 "0x%08x 0x%08x 0x%08x\n",
4121 be32_to_cpu(plnkflt->desc_tag),
4122 be32_to_cpu(plnkflt->desc_len),
4123 be32_to_cpu(
4124 plnkflt->degrade_activate_threshold),
4125 be32_to_cpu(
4126 plnkflt->degrade_deactivate_threshold),
4127 be32_to_cpu(plnkflt->fec_degrade_interval));
4128 break;
4129 case ELS_DTAG_CG_SIGNAL_CAP:
4130 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
4131 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
4132 sizeof(struct fc_diag_cg_sig_desc)) {
4133 lpfc_printf_log(
4134 phba, KERN_WARNING, LOG_CGN_MGMT,
4135 "6463 Truncated Cgn Signal Diagnostic "
4136 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
4137 desc_cnt, bytes_remain,
4138 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
4139 sizeof(struct fc_diag_cg_sig_desc));
4140 goto out;
4141 }
4142
4143 pcgd = (struct fc_diag_cg_sig_desc *)tlv;
4144 lpfc_printf_log(
4145 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
4146 "4616 CGN Desc Data: 0x%08x 0x%08x "
4147 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n",
4148 be32_to_cpu(pcgd->desc_tag),
4149 be32_to_cpu(pcgd->desc_len),
4150 be32_to_cpu(pcgd->xmt_signal_capability),
4151 be16_to_cpu(pcgd->xmt_signal_frequency.count),
4152 be16_to_cpu(pcgd->xmt_signal_frequency.units),
4153 be32_to_cpu(pcgd->rcv_signal_capability),
4154 be16_to_cpu(pcgd->rcv_signal_frequency.count),
4155 be16_to_cpu(pcgd->rcv_signal_frequency.units));
4156
4157 /* Compare driver and Fport capabilities and choose
4158 * least common.
4159 */
4160 lpfc_least_capable_settings(phba, pcgd);
4161 rcv_cap_desc = true;
4162 break;
4163 default:
4164 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
4165 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
4166 "4919 unknown Diagnostic "
4167 "Descriptor[%d]: tag x%x (%s)\n",
4168 desc_cnt, dtag, dtag_nm);
4169 }
4170
4171 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
4172 tlv = fc_tlv_next_desc(tlv);
4173 desc_cnt++;
4174 }
4175
4176 out:
4177 if (!rcv_cap_desc) {
4178 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
4179 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
4180 phba->cgn_sig_freq = 0;
4181 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
4182 "4202 EDC rsp error - sending RDF "
4183 "for FPIN only.\n");
4184 }
4185
4186 lpfc_config_cgn_signal(phba);
4187
4188 /* Check to see if link went down during discovery */
4189 lpfc_els_chk_latt(phba->pport);
4190 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
4191 "EDC Cmpl: did:x%x refcnt %d",
4192 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
4193 lpfc_els_free_iocb(phba, cmdiocb);
4194 lpfc_nlp_put(ndlp);
4195 }
4196
4197 static void
lpfc_format_edc_lft_desc(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)4198 lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
4199 {
4200 struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv;
4201
4202 lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP);
4203 lft->desc_len = cpu_to_be32(
4204 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc));
4205
4206 lft->degrade_activate_threshold =
4207 cpu_to_be32(phba->degrade_activate_threshold);
4208 lft->degrade_deactivate_threshold =
4209 cpu_to_be32(phba->degrade_deactivate_threshold);
4210 lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval);
4211 }
4212
4213 static void
lpfc_format_edc_cgn_desc(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)4214 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
4215 {
4216 struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv;
4217
4218 /* We are assuming cgd was zero'ed before calling this routine */
4219
4220 /* Configure the congestion detection capability */
4221 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP);
4222
4223 /* Descriptor len doesn't include the tag or len fields. */
4224 cgd->desc_len = cpu_to_be32(
4225 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc));
4226
4227 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
4228 * xmt_signal_frequency.count already set to 0.
4229 * xmt_signal_frequency.units already set to 0.
4230 */
4231
4232 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
4233 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
4234 * rcv_signal_frequency.count already set to 0.
4235 * rcv_signal_frequency.units already set to 0.
4236 */
4237 phba->cgn_sig_freq = 0;
4238 return;
4239 }
4240 switch (phba->cgn_reg_signal) {
4241 case EDC_CG_SIG_WARN_ONLY:
4242 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY);
4243 break;
4244 case EDC_CG_SIG_WARN_ALARM:
4245 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM);
4246 break;
4247 default:
4248 /* rcv_signal_capability left 0 thus no support */
4249 break;
4250 }
4251
4252 /* We start negotiation with lpfc_fabric_cgn_frequency, after
4253 * the completion we settle on the higher frequency.
4254 */
4255 cgd->rcv_signal_frequency.count =
4256 cpu_to_be16(lpfc_fabric_cgn_frequency);
4257 cgd->rcv_signal_frequency.units =
4258 cpu_to_be16(EDC_CG_SIGFREQ_MSEC);
4259 }
4260
4261 static bool
lpfc_link_is_lds_capable(struct lpfc_hba * phba)4262 lpfc_link_is_lds_capable(struct lpfc_hba *phba)
4263 {
4264 if (!(phba->lmt & LMT_64Gb))
4265 return false;
4266 if (phba->sli_rev != LPFC_SLI_REV4)
4267 return false;
4268
4269 if (phba->sli4_hba.conf_trunk) {
4270 if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G)
4271 return true;
4272 } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) {
4273 return true;
4274 }
4275 return false;
4276 }
4277
4278 /**
4279 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric.
4280 * @vport: pointer to a host virtual N_Port data structure.
4281 * @retry: retry counter for the command iocb.
4282 *
4283 * This routine issues an ELS EDC to the F-Port Controller to communicate
4284 * this N_Port's support of hardware signals in its Congestion
4285 * Capabilities Descriptor.
4286 *
4287 * Note: This routine does not check if one or more signals are
4288 * set in the cgn_reg_signal parameter. The caller makes the
4289 * decision to enforce cgn_reg_signal as nonzero or zero depending
4290 * on the conditions. During Fabric requests, the driver
4291 * requires cgn_reg_signals to be nonzero. But a dynamic request
4292 * to set the congestion mode to OFF from Monitor or Manage
4293 * would correctly issue an EDC with no signals enabled to
4294 * turn off switch functionality and then update the FW.
4295 *
4296 * Return code
4297 * 0 - Successfully issued edc command
4298 * 1 - Failed to issue edc command
4299 **/
4300 int
lpfc_issue_els_edc(struct lpfc_vport * vport,uint8_t retry)4301 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
4302 {
4303 struct lpfc_hba *phba = vport->phba;
4304 struct lpfc_iocbq *elsiocb;
4305 struct fc_els_edc *edc_req;
4306 struct fc_tlv_desc *tlv;
4307 u16 cmdsize;
4308 struct lpfc_nodelist *ndlp;
4309 u8 *pcmd = NULL;
4310 u32 cgn_desc_size, lft_desc_size;
4311 int rc;
4312
4313 if (vport->port_type == LPFC_NPIV_PORT)
4314 return -EACCES;
4315
4316 ndlp = lpfc_findnode_did(vport, Fabric_DID);
4317 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
4318 return -ENODEV;
4319
4320 cgn_desc_size = (phba->cgn_init_reg_signal) ?
4321 sizeof(struct fc_diag_cg_sig_desc) : 0;
4322 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
4323 sizeof(struct fc_diag_lnkflt_desc) : 0;
4324 cmdsize = cgn_desc_size + lft_desc_size;
4325
4326 /* Skip EDC if no applicable descriptors */
4327 if (!cmdsize)
4328 goto try_rdf;
4329
4330 cmdsize += sizeof(struct fc_els_edc);
4331 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
4332 ndlp->nlp_DID, ELS_CMD_EDC);
4333 if (!elsiocb)
4334 goto try_rdf;
4335
4336 /* Configure the payload for the supported Diagnostics capabilities. */
4337 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
4338 memset(pcmd, 0, cmdsize);
4339 edc_req = (struct fc_els_edc *)pcmd;
4340 edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size);
4341 edc_req->edc_cmd = ELS_EDC;
4342 tlv = edc_req->desc;
4343
4344 if (cgn_desc_size) {
4345 lpfc_format_edc_cgn_desc(phba, tlv);
4346 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
4347 tlv = fc_tlv_next_desc(tlv);
4348 }
4349
4350 if (lft_desc_size)
4351 lpfc_format_edc_lft_desc(phba, tlv);
4352
4353 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
4354 "4623 Xmit EDC to remote "
4355 "NPORT x%x reg_sig x%x reg_fpin:x%x\n",
4356 ndlp->nlp_DID, phba->cgn_reg_signal,
4357 phba->cgn_reg_fpin);
4358
4359 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
4360 elsiocb->ndlp = lpfc_nlp_get(ndlp);
4361 if (!elsiocb->ndlp) {
4362 lpfc_els_free_iocb(phba, elsiocb);
4363 return -EIO;
4364 }
4365
4366 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4367 "Issue EDC: did:x%x refcnt %d",
4368 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
4369 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4370 if (rc == IOCB_ERROR) {
4371 /* The additional lpfc_nlp_put will cause the following
4372 * lpfc_els_free_iocb routine to trigger the rlease of
4373 * the node.
4374 */
4375 lpfc_els_free_iocb(phba, elsiocb);
4376 lpfc_nlp_put(ndlp);
4377 goto try_rdf;
4378 }
4379 return 0;
4380 try_rdf:
4381 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
4382 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
4383 rc = lpfc_issue_els_rdf(vport, 0);
4384 return rc;
4385 }
4386
4387 /**
4388 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
4389 * @vport: pointer to a host virtual N_Port data structure.
4390 * @nlp: pointer to a node-list data structure.
4391 *
4392 * This routine cancels the timer with a delayed IOCB-command retry for
4393 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
4394 * removes the ELS retry event if it presents. In addition, if the
4395 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
4396 * commands are sent for the @vport's nodes that require issuing discovery
4397 * ADISC.
4398 **/
4399 void
lpfc_cancel_retry_delay_tmo(struct lpfc_vport * vport,struct lpfc_nodelist * nlp)4400 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
4401 {
4402 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4403 struct lpfc_work_evt *evtp;
4404
4405 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
4406 return;
4407 spin_lock_irq(&nlp->lock);
4408 nlp->nlp_flag &= ~NLP_DELAY_TMO;
4409 spin_unlock_irq(&nlp->lock);
4410 del_timer_sync(&nlp->nlp_delayfunc);
4411 nlp->nlp_last_elscmd = 0;
4412 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
4413 list_del_init(&nlp->els_retry_evt.evt_listp);
4414 /* Decrement nlp reference count held for the delayed retry */
4415 evtp = &nlp->els_retry_evt;
4416 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
4417 }
4418 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
4419 spin_lock_irq(&nlp->lock);
4420 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
4421 spin_unlock_irq(&nlp->lock);
4422 if (vport->num_disc_nodes) {
4423 if (vport->port_state < LPFC_VPORT_READY) {
4424 /* Check if there are more ADISCs to be sent */
4425 lpfc_more_adisc(vport);
4426 } else {
4427 /* Check if there are more PLOGIs to be sent */
4428 lpfc_more_plogi(vport);
4429 if (vport->num_disc_nodes == 0) {
4430 spin_lock_irq(shost->host_lock);
4431 vport->fc_flag &= ~FC_NDISC_ACTIVE;
4432 spin_unlock_irq(shost->host_lock);
4433 lpfc_can_disctmo(vport);
4434 lpfc_end_rscn(vport);
4435 }
4436 }
4437 }
4438 }
4439 return;
4440 }
4441
4442 /**
4443 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
4444 * @t: pointer to the timer function associated data (ndlp).
4445 *
4446 * This routine is invoked by the ndlp delayed-function timer to check
4447 * whether there is any pending ELS retry event(s) with the node. If not, it
4448 * simply returns. Otherwise, if there is at least one ELS delayed event, it
4449 * adds the delayed events to the HBA work list and invokes the
4450 * lpfc_worker_wake_up() routine to wake up worker thread to process the
4451 * event. Note that lpfc_nlp_get() is called before posting the event to
4452 * the work list to hold reference count of ndlp so that it guarantees the
4453 * reference to ndlp will still be available when the worker thread gets
4454 * to the event associated with the ndlp.
4455 **/
4456 void
lpfc_els_retry_delay(struct timer_list * t)4457 lpfc_els_retry_delay(struct timer_list *t)
4458 {
4459 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc);
4460 struct lpfc_vport *vport = ndlp->vport;
4461 struct lpfc_hba *phba = vport->phba;
4462 unsigned long flags;
4463 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
4464
4465 spin_lock_irqsave(&phba->hbalock, flags);
4466 if (!list_empty(&evtp->evt_listp)) {
4467 spin_unlock_irqrestore(&phba->hbalock, flags);
4468 return;
4469 }
4470
4471 /* We need to hold the node by incrementing the reference
4472 * count until the queued work is done
4473 */
4474 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
4475 if (evtp->evt_arg1) {
4476 evtp->evt = LPFC_EVT_ELS_RETRY;
4477 list_add_tail(&evtp->evt_listp, &phba->work_list);
4478 lpfc_worker_wake_up(phba);
4479 }
4480 spin_unlock_irqrestore(&phba->hbalock, flags);
4481 return;
4482 }
4483
4484 /**
4485 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
4486 * @ndlp: pointer to a node-list data structure.
4487 *
4488 * This routine is the worker-thread handler for processing the @ndlp delayed
4489 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
4490 * the last ELS command from the associated ndlp and invokes the proper ELS
4491 * function according to the delayed ELS command to retry the command.
4492 **/
4493 void
lpfc_els_retry_delay_handler(struct lpfc_nodelist * ndlp)4494 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
4495 {
4496 struct lpfc_vport *vport = ndlp->vport;
4497 uint32_t cmd, retry;
4498
4499 spin_lock_irq(&ndlp->lock);
4500 cmd = ndlp->nlp_last_elscmd;
4501 ndlp->nlp_last_elscmd = 0;
4502
4503 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
4504 spin_unlock_irq(&ndlp->lock);
4505 return;
4506 }
4507
4508 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
4509 spin_unlock_irq(&ndlp->lock);
4510 /*
4511 * If a discovery event readded nlp_delayfunc after timer
4512 * firing and before processing the timer, cancel the
4513 * nlp_delayfunc.
4514 */
4515 del_timer_sync(&ndlp->nlp_delayfunc);
4516 retry = ndlp->nlp_retry;
4517 ndlp->nlp_retry = 0;
4518
4519 switch (cmd) {
4520 case ELS_CMD_FLOGI:
4521 lpfc_issue_els_flogi(vport, ndlp, retry);
4522 break;
4523 case ELS_CMD_PLOGI:
4524 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
4525 ndlp->nlp_prev_state = ndlp->nlp_state;
4526 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4527 }
4528 break;
4529 case ELS_CMD_ADISC:
4530 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
4531 ndlp->nlp_prev_state = ndlp->nlp_state;
4532 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4533 }
4534 break;
4535 case ELS_CMD_PRLI:
4536 case ELS_CMD_NVMEPRLI:
4537 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
4538 ndlp->nlp_prev_state = ndlp->nlp_state;
4539 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
4540 }
4541 break;
4542 case ELS_CMD_LOGO:
4543 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
4544 ndlp->nlp_prev_state = ndlp->nlp_state;
4545 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
4546 }
4547 break;
4548 case ELS_CMD_FDISC:
4549 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
4550 lpfc_issue_els_fdisc(vport, ndlp, retry);
4551 break;
4552 }
4553 return;
4554 }
4555
4556 /**
4557 * lpfc_link_reset - Issue link reset
4558 * @vport: pointer to a virtual N_Port data structure.
4559 *
4560 * This routine performs link reset by sending INIT_LINK mailbox command.
4561 * For SLI-3 adapter, link attention interrupt is enabled before issuing
4562 * INIT_LINK mailbox command.
4563 *
4564 * Return code
4565 * 0 - Link reset initiated successfully
4566 * 1 - Failed to initiate link reset
4567 **/
4568 int
lpfc_link_reset(struct lpfc_vport * vport)4569 lpfc_link_reset(struct lpfc_vport *vport)
4570 {
4571 struct lpfc_hba *phba = vport->phba;
4572 LPFC_MBOXQ_t *mbox;
4573 uint32_t control;
4574 int rc;
4575
4576 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4577 "2851 Attempt link reset\n");
4578 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4579 if (!mbox) {
4580 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4581 "2852 Failed to allocate mbox memory");
4582 return 1;
4583 }
4584
4585 /* Enable Link attention interrupts */
4586 if (phba->sli_rev <= LPFC_SLI_REV3) {
4587 spin_lock_irq(&phba->hbalock);
4588 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4589 control = readl(phba->HCregaddr);
4590 control |= HC_LAINT_ENA;
4591 writel(control, phba->HCregaddr);
4592 readl(phba->HCregaddr); /* flush */
4593 spin_unlock_irq(&phba->hbalock);
4594 }
4595
4596 lpfc_init_link(phba, mbox, phba->cfg_topology,
4597 phba->cfg_link_speed);
4598 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4599 mbox->vport = vport;
4600 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4601 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4603 "2853 Failed to issue INIT_LINK "
4604 "mbox command, rc:x%x\n", rc);
4605 mempool_free(mbox, phba->mbox_mem_pool);
4606 return 1;
4607 }
4608
4609 return 0;
4610 }
4611
4612 /**
4613 * lpfc_els_retry - Make retry decision on an els command iocb
4614 * @phba: pointer to lpfc hba data structure.
4615 * @cmdiocb: pointer to lpfc command iocb data structure.
4616 * @rspiocb: pointer to lpfc response iocb data structure.
4617 *
4618 * This routine makes a retry decision on an ELS command IOCB, which has
4619 * failed. The following ELS IOCBs use this function for retrying the command
4620 * when previously issued command responsed with error status: FLOGI, PLOGI,
4621 * PRLI, ADISC and FDISC. Based on the ELS command type and the
4622 * returned error status, it makes the decision whether a retry shall be
4623 * issued for the command, and whether a retry shall be made immediately or
4624 * delayed. In the former case, the corresponding ELS command issuing-function
4625 * is called to retry the command. In the later case, the ELS command shall
4626 * be posted to the ndlp delayed event and delayed function timer set to the
4627 * ndlp for the delayed command issusing.
4628 *
4629 * Return code
4630 * 0 - No retry of els command is made
4631 * 1 - Immediate or delayed retry of els command is made
4632 **/
4633 static int
lpfc_els_retry(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)4634 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4635 struct lpfc_iocbq *rspiocb)
4636 {
4637 struct lpfc_vport *vport = cmdiocb->vport;
4638 union lpfc_wqe128 *irsp = &rspiocb->wqe;
4639 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
4640 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
4641 uint32_t *elscmd;
4642 struct ls_rjt stat;
4643 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
4644 int logerr = 0;
4645 uint32_t cmd = 0;
4646 uint32_t did;
4647 int link_reset = 0, rc;
4648 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
4649 u32 ulp_word4 = get_job_word4(phba, rspiocb);
4650
4651
4652 /* Note: cmd_dmabuf may be 0 for internal driver abort
4653 * of delays ELS command.
4654 */
4655
4656 if (pcmd && pcmd->virt) {
4657 elscmd = (uint32_t *) (pcmd->virt);
4658 cmd = *elscmd++;
4659 }
4660
4661 if (ndlp)
4662 did = ndlp->nlp_DID;
4663 else {
4664 /* We should only hit this case for retrying PLOGI */
4665 did = get_job_els_rsp64_did(phba, rspiocb);
4666 ndlp = lpfc_findnode_did(vport, did);
4667 if (!ndlp && (cmd != ELS_CMD_PLOGI))
4668 return 0;
4669 }
4670
4671 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4672 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
4673 *(((uint32_t *)irsp) + 7), ulp_word4, did);
4674
4675 switch (ulp_status) {
4676 case IOSTAT_FCP_RSP_ERROR:
4677 break;
4678 case IOSTAT_REMOTE_STOP:
4679 if (phba->sli_rev == LPFC_SLI_REV4) {
4680 /* This IO was aborted by the target, we don't
4681 * know the rxid and because we did not send the
4682 * ABTS we cannot generate and RRQ.
4683 */
4684 lpfc_set_rrq_active(phba, ndlp,
4685 cmdiocb->sli4_lxritag, 0, 0);
4686 }
4687 break;
4688 case IOSTAT_LOCAL_REJECT:
4689 switch ((ulp_word4 & IOERR_PARAM_MASK)) {
4690 case IOERR_LOOP_OPEN_FAILURE:
4691 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
4692 delay = 1000;
4693 retry = 1;
4694 break;
4695
4696 case IOERR_ILLEGAL_COMMAND:
4697 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4698 "0124 Retry illegal cmd x%x "
4699 "retry:x%x delay:x%x\n",
4700 cmd, cmdiocb->retry, delay);
4701 retry = 1;
4702 /* All command's retry policy */
4703 maxretry = 8;
4704 if (cmdiocb->retry > 2)
4705 delay = 1000;
4706 break;
4707
4708 case IOERR_NO_RESOURCES:
4709 logerr = 1; /* HBA out of resources */
4710 retry = 1;
4711 if (cmdiocb->retry > 100)
4712 delay = 100;
4713 maxretry = 250;
4714 break;
4715
4716 case IOERR_ILLEGAL_FRAME:
4717 delay = 100;
4718 retry = 1;
4719 break;
4720
4721 case IOERR_INVALID_RPI:
4722 if (cmd == ELS_CMD_PLOGI &&
4723 did == NameServer_DID) {
4724 /* Continue forever if plogi to */
4725 /* the nameserver fails */
4726 maxretry = 0;
4727 delay = 100;
4728 } else if (cmd == ELS_CMD_PRLI &&
4729 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
4730 /* State-command disagreement. The PRLI was
4731 * failed with an invalid rpi meaning there
4732 * some unexpected state change. Don't retry.
4733 */
4734 maxretry = 0;
4735 retry = 0;
4736 break;
4737 }
4738 retry = 1;
4739 break;
4740
4741 case IOERR_SEQUENCE_TIMEOUT:
4742 if (cmd == ELS_CMD_PLOGI &&
4743 did == NameServer_DID &&
4744 (cmdiocb->retry + 1) == maxretry) {
4745 /* Reset the Link */
4746 link_reset = 1;
4747 break;
4748 }
4749 retry = 1;
4750 delay = 100;
4751 break;
4752 case IOERR_SLI_ABORTED:
4753 /* Retry ELS PLOGI command?
4754 * Possibly the rport just wasn't ready.
4755 */
4756 if (cmd == ELS_CMD_PLOGI) {
4757 /* No retry if state change */
4758 if (ndlp &&
4759 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
4760 goto out_retry;
4761 retry = 1;
4762 maxretry = 2;
4763 }
4764 break;
4765 }
4766 break;
4767
4768 case IOSTAT_NPORT_RJT:
4769 case IOSTAT_FABRIC_RJT:
4770 if (ulp_word4 & RJT_UNAVAIL_TEMP) {
4771 retry = 1;
4772 break;
4773 }
4774 break;
4775
4776 case IOSTAT_NPORT_BSY:
4777 case IOSTAT_FABRIC_BSY:
4778 logerr = 1; /* Fabric / Remote NPort out of resources */
4779 retry = 1;
4780 break;
4781
4782 case IOSTAT_LS_RJT:
4783 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
4784 /* Added for Vendor specifc support
4785 * Just keep retrying for these Rsn / Exp codes
4786 */
4787 if ((vport->fc_flag & FC_PT2PT) &&
4788 cmd == ELS_CMD_NVMEPRLI) {
4789 switch (stat.un.b.lsRjtRsnCode) {
4790 case LSRJT_UNABLE_TPC:
4791 case LSRJT_INVALID_CMD:
4792 case LSRJT_LOGICAL_ERR:
4793 case LSRJT_CMD_UNSUPPORTED:
4794 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
4795 "0168 NVME PRLI LS_RJT "
4796 "reason %x port doesn't "
4797 "support NVME, disabling NVME\n",
4798 stat.un.b.lsRjtRsnCode);
4799 retry = 0;
4800 vport->fc_flag |= FC_PT2PT_NO_NVME;
4801 goto out_retry;
4802 }
4803 }
4804 switch (stat.un.b.lsRjtRsnCode) {
4805 case LSRJT_UNABLE_TPC:
4806 /* Special case for PRLI LS_RJTs. Recall that lpfc
4807 * uses a single routine to issue both PRLI FC4 types.
4808 * If the PRLI is rejected because that FC4 type
4809 * isn't really supported, don't retry and cause
4810 * multiple transport registrations. Otherwise, parse
4811 * the reason code/reason code explanation and take the
4812 * appropriate action.
4813 */
4814 lpfc_printf_vlog(vport, KERN_INFO,
4815 LOG_DISCOVERY | LOG_ELS | LOG_NODE,
4816 "0153 ELS cmd x%x LS_RJT by x%x. "
4817 "RsnCode x%x RsnCodeExp x%x\n",
4818 cmd, did, stat.un.b.lsRjtRsnCode,
4819 stat.un.b.lsRjtRsnCodeExp);
4820
4821 switch (stat.un.b.lsRjtRsnCodeExp) {
4822 case LSEXP_CANT_GIVE_DATA:
4823 case LSEXP_CMD_IN_PROGRESS:
4824 if (cmd == ELS_CMD_PLOGI) {
4825 delay = 1000;
4826 maxretry = 48;
4827 }
4828 retry = 1;
4829 break;
4830 case LSEXP_REQ_UNSUPPORTED:
4831 case LSEXP_NO_RSRC_ASSIGN:
4832 /* These explanation codes get no retry. */
4833 if (cmd == ELS_CMD_PRLI ||
4834 cmd == ELS_CMD_NVMEPRLI)
4835 break;
4836 fallthrough;
4837 default:
4838 /* Limit the delay and retry action to a limited
4839 * cmd set. There are other ELS commands where
4840 * a retry is not expected.
4841 */
4842 if (cmd == ELS_CMD_PLOGI ||
4843 cmd == ELS_CMD_PRLI ||
4844 cmd == ELS_CMD_NVMEPRLI) {
4845 delay = 1000;
4846 maxretry = lpfc_max_els_tries + 1;
4847 retry = 1;
4848 }
4849 break;
4850 }
4851
4852 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4853 (cmd == ELS_CMD_FDISC) &&
4854 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
4855 lpfc_printf_vlog(vport, KERN_ERR,
4856 LOG_TRACE_EVENT,
4857 "0125 FDISC Failed (x%x). "
4858 "Fabric out of resources\n",
4859 stat.un.lsRjtError);
4860 lpfc_vport_set_state(vport,
4861 FC_VPORT_NO_FABRIC_RSCS);
4862 }
4863 break;
4864
4865 case LSRJT_LOGICAL_BSY:
4866 if ((cmd == ELS_CMD_PLOGI) ||
4867 (cmd == ELS_CMD_PRLI) ||
4868 (cmd == ELS_CMD_NVMEPRLI)) {
4869 delay = 1000;
4870 maxretry = 48;
4871 } else if (cmd == ELS_CMD_FDISC) {
4872 /* FDISC retry policy */
4873 maxretry = 48;
4874 if (cmdiocb->retry >= 32)
4875 delay = 1000;
4876 }
4877 retry = 1;
4878 break;
4879
4880 case LSRJT_LOGICAL_ERR:
4881 /* There are some cases where switches return this
4882 * error when they are not ready and should be returning
4883 * Logical Busy. We should delay every time.
4884 */
4885 if (cmd == ELS_CMD_FDISC &&
4886 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
4887 maxretry = 3;
4888 delay = 1000;
4889 retry = 1;
4890 } else if (cmd == ELS_CMD_FLOGI &&
4891 stat.un.b.lsRjtRsnCodeExp ==
4892 LSEXP_NOTHING_MORE) {
4893 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
4894 retry = 1;
4895 lpfc_printf_vlog(vport, KERN_ERR,
4896 LOG_TRACE_EVENT,
4897 "0820 FLOGI Failed (x%x). "
4898 "BBCredit Not Supported\n",
4899 stat.un.lsRjtError);
4900 }
4901 break;
4902
4903 case LSRJT_PROTOCOL_ERR:
4904 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4905 (cmd == ELS_CMD_FDISC) &&
4906 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
4907 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
4908 ) {
4909 lpfc_printf_vlog(vport, KERN_ERR,
4910 LOG_TRACE_EVENT,
4911 "0122 FDISC Failed (x%x). "
4912 "Fabric Detected Bad WWN\n",
4913 stat.un.lsRjtError);
4914 lpfc_vport_set_state(vport,
4915 FC_VPORT_FABRIC_REJ_WWN);
4916 }
4917 break;
4918 case LSRJT_VENDOR_UNIQUE:
4919 if ((stat.un.b.vendorUnique == 0x45) &&
4920 (cmd == ELS_CMD_FLOGI)) {
4921 goto out_retry;
4922 }
4923 break;
4924 case LSRJT_CMD_UNSUPPORTED:
4925 /* lpfc nvmet returns this type of LS_RJT when it
4926 * receives an FCP PRLI because lpfc nvmet only
4927 * support NVME. ELS request is terminated for FCP4
4928 * on this rport.
4929 */
4930 if (stat.un.b.lsRjtRsnCodeExp ==
4931 LSEXP_REQ_UNSUPPORTED) {
4932 if (cmd == ELS_CMD_PRLI)
4933 goto out_retry;
4934 }
4935 break;
4936 }
4937 break;
4938
4939 case IOSTAT_INTERMED_RSP:
4940 case IOSTAT_BA_RJT:
4941 break;
4942
4943 default:
4944 break;
4945 }
4946
4947 if (link_reset) {
4948 rc = lpfc_link_reset(vport);
4949 if (rc) {
4950 /* Do not give up. Retry PLOGI one more time and attempt
4951 * link reset if PLOGI fails again.
4952 */
4953 retry = 1;
4954 delay = 100;
4955 goto out_retry;
4956 }
4957 return 1;
4958 }
4959
4960 if (did == FDMI_DID)
4961 retry = 1;
4962
4963 if ((cmd == ELS_CMD_FLOGI) &&
4964 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
4965 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
4966 /* FLOGI retry policy */
4967 retry = 1;
4968 /* retry FLOGI forever */
4969 if (phba->link_flag != LS_LOOPBACK_MODE)
4970 maxretry = 0;
4971 else
4972 maxretry = 2;
4973
4974 if (cmdiocb->retry >= 100)
4975 delay = 5000;
4976 else if (cmdiocb->retry >= 32)
4977 delay = 1000;
4978 } else if ((cmd == ELS_CMD_FDISC) &&
4979 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
4980 /* retry FDISCs every second up to devloss */
4981 retry = 1;
4982 maxretry = vport->cfg_devloss_tmo;
4983 delay = 1000;
4984 }
4985
4986 cmdiocb->retry++;
4987 if (maxretry && (cmdiocb->retry >= maxretry)) {
4988 phba->fc_stat.elsRetryExceeded++;
4989 retry = 0;
4990 }
4991
4992 if ((vport->load_flag & FC_UNLOADING) != 0)
4993 retry = 0;
4994
4995 out_retry:
4996 if (retry) {
4997 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
4998 /* Stop retrying PLOGI and FDISC if in FCF discovery */
4999 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5000 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5001 "2849 Stop retry ELS command "
5002 "x%x to remote NPORT x%x, "
5003 "Data: x%x x%x\n", cmd, did,
5004 cmdiocb->retry, delay);
5005 return 0;
5006 }
5007 }
5008
5009 /* Retry ELS command <elsCmd> to remote NPORT <did> */
5010 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5011 "0107 Retry ELS command x%x to remote "
5012 "NPORT x%x Data: x%x x%x\n",
5013 cmd, did, cmdiocb->retry, delay);
5014
5015 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
5016 ((ulp_status != IOSTAT_LOCAL_REJECT) ||
5017 ((ulp_word4 & IOERR_PARAM_MASK) !=
5018 IOERR_NO_RESOURCES))) {
5019 /* Don't reset timer for no resources */
5020
5021 /* If discovery / RSCN timer is running, reset it */
5022 if (timer_pending(&vport->fc_disctmo) ||
5023 (vport->fc_flag & FC_RSCN_MODE))
5024 lpfc_set_disctmo(vport);
5025 }
5026
5027 phba->fc_stat.elsXmitRetry++;
5028 if (ndlp && delay) {
5029 phba->fc_stat.elsDelayRetry++;
5030 ndlp->nlp_retry = cmdiocb->retry;
5031
5032 /* delay is specified in milliseconds */
5033 mod_timer(&ndlp->nlp_delayfunc,
5034 jiffies + msecs_to_jiffies(delay));
5035 spin_lock_irq(&ndlp->lock);
5036 ndlp->nlp_flag |= NLP_DELAY_TMO;
5037 spin_unlock_irq(&ndlp->lock);
5038
5039 ndlp->nlp_prev_state = ndlp->nlp_state;
5040 if ((cmd == ELS_CMD_PRLI) ||
5041 (cmd == ELS_CMD_NVMEPRLI))
5042 lpfc_nlp_set_state(vport, ndlp,
5043 NLP_STE_PRLI_ISSUE);
5044 else if (cmd != ELS_CMD_ADISC)
5045 lpfc_nlp_set_state(vport, ndlp,
5046 NLP_STE_NPR_NODE);
5047 ndlp->nlp_last_elscmd = cmd;
5048
5049 return 1;
5050 }
5051 switch (cmd) {
5052 case ELS_CMD_FLOGI:
5053 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
5054 return 1;
5055 case ELS_CMD_FDISC:
5056 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
5057 return 1;
5058 case ELS_CMD_PLOGI:
5059 if (ndlp) {
5060 ndlp->nlp_prev_state = ndlp->nlp_state;
5061 lpfc_nlp_set_state(vport, ndlp,
5062 NLP_STE_PLOGI_ISSUE);
5063 }
5064 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
5065 return 1;
5066 case ELS_CMD_ADISC:
5067 ndlp->nlp_prev_state = ndlp->nlp_state;
5068 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
5069 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
5070 return 1;
5071 case ELS_CMD_PRLI:
5072 case ELS_CMD_NVMEPRLI:
5073 ndlp->nlp_prev_state = ndlp->nlp_state;
5074 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
5075 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
5076 return 1;
5077 case ELS_CMD_LOGO:
5078 ndlp->nlp_prev_state = ndlp->nlp_state;
5079 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
5080 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
5081 return 1;
5082 }
5083 }
5084 /* No retry ELS command <elsCmd> to remote NPORT <did> */
5085 if (logerr) {
5086 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5087 "0137 No retry ELS command x%x to remote "
5088 "NPORT x%x: Out of Resources: Error:x%x/%x "
5089 "IoTag x%x\n",
5090 cmd, did, ulp_status, ulp_word4,
5091 cmdiocb->iotag);
5092 }
5093 else {
5094 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5095 "0108 No retry ELS command x%x to remote "
5096 "NPORT x%x Retried:%d Error:x%x/%x "
5097 "IoTag x%x nflags x%x\n",
5098 cmd, did, cmdiocb->retry, ulp_status,
5099 ulp_word4, cmdiocb->iotag,
5100 (ndlp ? ndlp->nlp_flag : 0));
5101 }
5102 return 0;
5103 }
5104
5105 /**
5106 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
5107 * @phba: pointer to lpfc hba data structure.
5108 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
5109 *
5110 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
5111 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
5112 * checks to see whether there is a lpfc DMA buffer associated with the
5113 * response of the command IOCB. If so, it will be released before releasing
5114 * the lpfc DMA buffer associated with the IOCB itself.
5115 *
5116 * Return code
5117 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
5118 **/
5119 static int
lpfc_els_free_data(struct lpfc_hba * phba,struct lpfc_dmabuf * buf_ptr1)5120 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
5121 {
5122 struct lpfc_dmabuf *buf_ptr;
5123
5124 /* Free the response before processing the command. */
5125 if (!list_empty(&buf_ptr1->list)) {
5126 list_remove_head(&buf_ptr1->list, buf_ptr,
5127 struct lpfc_dmabuf,
5128 list);
5129 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
5130 kfree(buf_ptr);
5131 }
5132 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
5133 kfree(buf_ptr1);
5134 return 0;
5135 }
5136
5137 /**
5138 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
5139 * @phba: pointer to lpfc hba data structure.
5140 * @buf_ptr: pointer to the lpfc dma buffer data structure.
5141 *
5142 * This routine releases the lpfc Direct Memory Access (DMA) buffer
5143 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
5144 * pool.
5145 *
5146 * Return code
5147 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
5148 **/
5149 static int
lpfc_els_free_bpl(struct lpfc_hba * phba,struct lpfc_dmabuf * buf_ptr)5150 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
5151 {
5152 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
5153 kfree(buf_ptr);
5154 return 0;
5155 }
5156
5157 /**
5158 * lpfc_els_free_iocb - Free a command iocb and its associated resources
5159 * @phba: pointer to lpfc hba data structure.
5160 * @elsiocb: pointer to lpfc els command iocb data structure.
5161 *
5162 * This routine frees a command IOCB and its associated resources. The
5163 * command IOCB data structure contains the reference to various associated
5164 * resources, these fields must be set to NULL if the associated reference
5165 * not present:
5166 * cmd_dmabuf - reference to cmd.
5167 * cmd_dmabuf->next - reference to rsp
5168 * rsp_dmabuf - unused
5169 * bpl_dmabuf - reference to bpl
5170 *
5171 * It first properly decrements the reference count held on ndlp for the
5172 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
5173 * set, it invokes the lpfc_els_free_data() routine to release the Direct
5174 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
5175 * adds the DMA buffer the @phba data structure for the delayed release.
5176 * If reference to the Buffer Pointer List (BPL) is present, the
5177 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
5178 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
5179 * invoked to release the IOCB data structure back to @phba IOCBQ list.
5180 *
5181 * Return code
5182 * 0 - Success (currently, always return 0)
5183 **/
5184 int
lpfc_els_free_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * elsiocb)5185 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
5186 {
5187 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
5188
5189 /* The I/O iocb is complete. Clear the node and first dmbuf */
5190 elsiocb->ndlp = NULL;
5191
5192 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */
5193 if (elsiocb->cmd_dmabuf) {
5194 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) {
5195 /* Firmware could still be in progress of DMAing
5196 * payload, so don't free data buffer till after
5197 * a hbeat.
5198 */
5199 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE;
5200 buf_ptr = elsiocb->cmd_dmabuf;
5201 elsiocb->cmd_dmabuf = NULL;
5202 if (buf_ptr) {
5203 buf_ptr1 = NULL;
5204 spin_lock_irq(&phba->hbalock);
5205 if (!list_empty(&buf_ptr->list)) {
5206 list_remove_head(&buf_ptr->list,
5207 buf_ptr1, struct lpfc_dmabuf,
5208 list);
5209 INIT_LIST_HEAD(&buf_ptr1->list);
5210 list_add_tail(&buf_ptr1->list,
5211 &phba->elsbuf);
5212 phba->elsbuf_cnt++;
5213 }
5214 INIT_LIST_HEAD(&buf_ptr->list);
5215 list_add_tail(&buf_ptr->list, &phba->elsbuf);
5216 phba->elsbuf_cnt++;
5217 spin_unlock_irq(&phba->hbalock);
5218 }
5219 } else {
5220 buf_ptr1 = elsiocb->cmd_dmabuf;
5221 lpfc_els_free_data(phba, buf_ptr1);
5222 elsiocb->cmd_dmabuf = NULL;
5223 }
5224 }
5225
5226 if (elsiocb->bpl_dmabuf) {
5227 buf_ptr = elsiocb->bpl_dmabuf;
5228 lpfc_els_free_bpl(phba, buf_ptr);
5229 elsiocb->bpl_dmabuf = NULL;
5230 }
5231 lpfc_sli_release_iocbq(phba, elsiocb);
5232 return 0;
5233 }
5234
5235 /**
5236 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
5237 * @phba: pointer to lpfc hba data structure.
5238 * @cmdiocb: pointer to lpfc command iocb data structure.
5239 * @rspiocb: pointer to lpfc response iocb data structure.
5240 *
5241 * This routine is the completion callback function to the Logout (LOGO)
5242 * Accept (ACC) Response ELS command. This routine is invoked to indicate
5243 * the completion of the LOGO process. If the node has transitioned to NPR,
5244 * this routine unregisters the RPI if it is still registered. The
5245 * lpfc_els_free_iocb() is invoked to release the IOCB data structure.
5246 **/
5247 static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)5248 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
5249 struct lpfc_iocbq *rspiocb)
5250 {
5251 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
5252 struct lpfc_vport *vport = cmdiocb->vport;
5253 u32 ulp_status, ulp_word4;
5254
5255 ulp_status = get_job_ulpstatus(phba, rspiocb);
5256 ulp_word4 = get_job_word4(phba, rspiocb);
5257
5258 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5259 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
5260 ulp_status, ulp_word4, ndlp->nlp_DID);
5261 /* ACC to LOGO completes to NPort <nlp_DID> */
5262 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5263 "0109 ACC to LOGO completes to NPort x%x refcnt %d "
5264 "Data: x%x x%x x%x\n",
5265 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
5266 ndlp->nlp_state, ndlp->nlp_rpi);
5267
5268 /* This clause allows the LOGO ACC to complete and free resources
5269 * for the Fabric Domain Controller. It does deliberately skip
5270 * the unreg_rpi and release rpi because some fabrics send RDP
5271 * requests after logging out from the initiator.
5272 */
5273 if (ndlp->nlp_type & NLP_FABRIC &&
5274 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
5275 goto out;
5276
5277 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
5278 /* If PLOGI is being retried, PLOGI completion will cleanup the
5279 * node. The NLP_NPR_2B_DISC flag needs to be retained to make
5280 * progress on nodes discovered from last RSCN.
5281 */
5282 if ((ndlp->nlp_flag & NLP_DELAY_TMO) &&
5283 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
5284 goto out;
5285
5286 if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
5287 lpfc_unreg_rpi(vport, ndlp);
5288
5289 }
5290 out:
5291 /*
5292 * The driver received a LOGO from the rport and has ACK'd it.
5293 * At this point, the driver is done so release the IOCB
5294 */
5295 lpfc_els_free_iocb(phba, cmdiocb);
5296 lpfc_nlp_put(ndlp);
5297 }
5298
5299 /**
5300 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
5301 * @phba: pointer to lpfc hba data structure.
5302 * @pmb: pointer to the driver internal queue element for mailbox command.
5303 *
5304 * This routine is the completion callback function for unregister default
5305 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
5306 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
5307 * decrements the ndlp reference count held for this completion callback
5308 * function. After that, it invokes the lpfc_drop_node to check
5309 * whether it is appropriate to release the node.
5310 **/
5311 void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)5312 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5313 {
5314 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
5315 u32 mbx_flag = pmb->mbox_flag;
5316 u32 mbx_cmd = pmb->u.mb.mbxCommand;
5317
5318 if (ndlp) {
5319 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
5320 "0006 rpi x%x DID:%x flg:%x %d x%px "
5321 "mbx_cmd x%x mbx_flag x%x x%px\n",
5322 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5323 kref_read(&ndlp->kref), ndlp, mbx_cmd,
5324 mbx_flag, pmb);
5325
5326 /* This ends the default/temporary RPI cleanup logic for this
5327 * ndlp and the node and rpi needs to be released. Free the rpi
5328 * first on an UNREG_LOGIN and then release the final
5329 * references.
5330 */
5331 spin_lock_irq(&ndlp->lock);
5332 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
5333 if (mbx_cmd == MBX_UNREG_LOGIN)
5334 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5335 spin_unlock_irq(&ndlp->lock);
5336 lpfc_nlp_put(ndlp);
5337 lpfc_drop_node(ndlp->vport, ndlp);
5338 }
5339
5340 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5341 }
5342
5343 /**
5344 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
5345 * @phba: pointer to lpfc hba data structure.
5346 * @cmdiocb: pointer to lpfc command iocb data structure.
5347 * @rspiocb: pointer to lpfc response iocb data structure.
5348 *
5349 * This routine is the completion callback function for ELS Response IOCB
5350 * command. In normal case, this callback function just properly sets the
5351 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
5352 * field in the command IOCB is not NULL, the referred mailbox command will
5353 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
5354 * the IOCB.
5355 **/
5356 static void
lpfc_cmpl_els_rsp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)5357 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
5358 struct lpfc_iocbq *rspiocb)
5359 {
5360 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
5361 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
5362 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
5363 IOCB_t *irsp;
5364 LPFC_MBOXQ_t *mbox = NULL;
5365 u32 ulp_status, ulp_word4, tmo, did, iotag;
5366
5367 if (!vport) {
5368 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5369 "3177 ELS response failed\n");
5370 goto out;
5371 }
5372 if (cmdiocb->context_un.mbox)
5373 mbox = cmdiocb->context_un.mbox;
5374
5375 ulp_status = get_job_ulpstatus(phba, rspiocb);
5376 ulp_word4 = get_job_word4(phba, rspiocb);
5377 did = get_job_els_rsp64_did(phba, cmdiocb);
5378
5379 if (phba->sli_rev == LPFC_SLI_REV4) {
5380 tmo = get_wqe_tmo(cmdiocb);
5381 iotag = get_wqe_reqtag(cmdiocb);
5382 } else {
5383 irsp = &rspiocb->iocb;
5384 tmo = irsp->ulpTimeout;
5385 iotag = irsp->ulpIoTag;
5386 }
5387
5388 /* Check to see if link went down during discovery */
5389 if (!ndlp || lpfc_els_chk_latt(vport)) {
5390 if (mbox)
5391 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
5392 goto out;
5393 }
5394
5395 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5396 "ELS rsp cmpl: status:x%x/x%x did:x%x",
5397 ulp_status, ulp_word4, did);
5398 /* ELS response tag <ulpIoTag> completes */
5399 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5400 "0110 ELS response tag x%x completes "
5401 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n",
5402 iotag, ulp_status, ulp_word4, tmo,
5403 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5404 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp);
5405 if (mbox) {
5406 if (ulp_status == 0
5407 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
5408 if (!lpfc_unreg_rpi(vport, ndlp) &&
5409 (!(vport->fc_flag & FC_PT2PT))) {
5410 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5411 ndlp->nlp_state ==
5412 NLP_STE_REG_LOGIN_ISSUE) {
5413 lpfc_printf_vlog(vport, KERN_INFO,
5414 LOG_DISCOVERY,
5415 "0314 PLOGI recov "
5416 "DID x%x "
5417 "Data: x%x x%x x%x\n",
5418 ndlp->nlp_DID,
5419 ndlp->nlp_state,
5420 ndlp->nlp_rpi,
5421 ndlp->nlp_flag);
5422 goto out_free_mbox;
5423 }
5424 }
5425
5426 /* Increment reference count to ndlp to hold the
5427 * reference to ndlp for the callback function.
5428 */
5429 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
5430 if (!mbox->ctx_ndlp)
5431 goto out_free_mbox;
5432
5433 mbox->vport = vport;
5434 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
5435 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
5436 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
5437 }
5438 else {
5439 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
5440 ndlp->nlp_prev_state = ndlp->nlp_state;
5441 lpfc_nlp_set_state(vport, ndlp,
5442 NLP_STE_REG_LOGIN_ISSUE);
5443 }
5444
5445 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
5446 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5447 != MBX_NOT_FINISHED)
5448 goto out;
5449
5450 /* Decrement the ndlp reference count we
5451 * set for this failed mailbox command.
5452 */
5453 lpfc_nlp_put(ndlp);
5454 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
5455
5456 /* ELS rsp: Cannot issue reg_login for <NPortid> */
5457 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5458 "0138 ELS rsp: Cannot issue reg_login for x%x "
5459 "Data: x%x x%x x%x\n",
5460 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5461 ndlp->nlp_rpi);
5462 }
5463 out_free_mbox:
5464 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
5465 }
5466 out:
5467 if (ndlp && shost) {
5468 spin_lock_irq(&ndlp->lock);
5469 if (mbox)
5470 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
5471 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
5472 spin_unlock_irq(&ndlp->lock);
5473 }
5474
5475 /* An SLI4 NPIV instance wants to drop the node at this point under
5476 * these conditions and release the RPI.
5477 */
5478 if (phba->sli_rev == LPFC_SLI_REV4 &&
5479 vport && vport->port_type == LPFC_NPIV_PORT &&
5480 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
5481 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
5482 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
5483 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
5484 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
5485 spin_lock_irq(&ndlp->lock);
5486 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
5487 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
5488 spin_unlock_irq(&ndlp->lock);
5489 }
5490 lpfc_drop_node(vport, ndlp);
5491 } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
5492 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE &&
5493 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
5494 /* Drop ndlp if there is no planned or outstanding
5495 * issued PRLI.
5496 *
5497 * In cases when the ndlp is acting as both an initiator
5498 * and target function, let our issued PRLI determine
5499 * the final ndlp kref drop.
5500 */
5501 lpfc_drop_node(vport, ndlp);
5502 }
5503 }
5504
5505 /* Release the originating I/O reference. */
5506 lpfc_els_free_iocb(phba, cmdiocb);
5507 lpfc_nlp_put(ndlp);
5508 return;
5509 }
5510
5511 /**
5512 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
5513 * @vport: pointer to a host virtual N_Port data structure.
5514 * @flag: the els command code to be accepted.
5515 * @oldiocb: pointer to the original lpfc command iocb data structure.
5516 * @ndlp: pointer to a node-list data structure.
5517 * @mbox: pointer to the driver internal queue element for mailbox command.
5518 *
5519 * This routine prepares and issues an Accept (ACC) response IOCB
5520 * command. It uses the @flag to properly set up the IOCB field for the
5521 * specific ACC response command to be issued and invokes the
5522 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
5523 * @mbox pointer is passed in, it will be put into the context_un.mbox
5524 * field of the IOCB for the completion callback function to issue the
5525 * mailbox command to the HBA later when callback is invoked.
5526 *
5527 * Note that the ndlp reference count will be incremented by 1 for holding the
5528 * ndlp and the reference to ndlp will be stored into the ndlp field of
5529 * the IOCB for the completion callback function to the corresponding
5530 * response ELS IOCB command.
5531 *
5532 * Return code
5533 * 0 - Successfully issued acc response
5534 * 1 - Failed to issue acc response
5535 **/
5536 int
lpfc_els_rsp_acc(struct lpfc_vport * vport,uint32_t flag,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp,LPFC_MBOXQ_t * mbox)5537 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
5538 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
5539 LPFC_MBOXQ_t *mbox)
5540 {
5541 struct lpfc_hba *phba = vport->phba;
5542 IOCB_t *icmd;
5543 IOCB_t *oldcmd;
5544 union lpfc_wqe128 *wqe;
5545 union lpfc_wqe128 *oldwqe = &oldiocb->wqe;
5546 struct lpfc_iocbq *elsiocb;
5547 uint8_t *pcmd;
5548 struct serv_parm *sp;
5549 uint16_t cmdsize;
5550 int rc;
5551 ELS_PKT *els_pkt_ptr;
5552 struct fc_els_rdf_resp *rdf_resp;
5553
5554 switch (flag) {
5555 case ELS_CMD_ACC:
5556 cmdsize = sizeof(uint32_t);
5557 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5558 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5559 if (!elsiocb) {
5560 spin_lock_irq(&ndlp->lock);
5561 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5562 spin_unlock_irq(&ndlp->lock);
5563 return 1;
5564 }
5565
5566 if (phba->sli_rev == LPFC_SLI_REV4) {
5567 wqe = &elsiocb->wqe;
5568 /* XRI / rx_id */
5569 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5570 bf_get(wqe_ctxt_tag,
5571 &oldwqe->xmit_els_rsp.wqe_com));
5572
5573 /* oxid */
5574 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5575 bf_get(wqe_rcvoxid,
5576 &oldwqe->xmit_els_rsp.wqe_com));
5577 } else {
5578 icmd = &elsiocb->iocb;
5579 oldcmd = &oldiocb->iocb;
5580 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5581 icmd->unsli3.rcvsli3.ox_id =
5582 oldcmd->unsli3.rcvsli3.ox_id;
5583 }
5584
5585 pcmd = elsiocb->cmd_dmabuf->virt;
5586 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5587 pcmd += sizeof(uint32_t);
5588
5589 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5590 "Issue ACC: did:x%x flg:x%x",
5591 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5592 break;
5593 case ELS_CMD_FLOGI:
5594 case ELS_CMD_PLOGI:
5595 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
5596 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5597 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5598 if (!elsiocb)
5599 return 1;
5600
5601 if (phba->sli_rev == LPFC_SLI_REV4) {
5602 wqe = &elsiocb->wqe;
5603 /* XRI / rx_id */
5604 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5605 bf_get(wqe_ctxt_tag,
5606 &oldwqe->xmit_els_rsp.wqe_com));
5607
5608 /* oxid */
5609 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5610 bf_get(wqe_rcvoxid,
5611 &oldwqe->xmit_els_rsp.wqe_com));
5612 } else {
5613 icmd = &elsiocb->iocb;
5614 oldcmd = &oldiocb->iocb;
5615 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5616 icmd->unsli3.rcvsli3.ox_id =
5617 oldcmd->unsli3.rcvsli3.ox_id;
5618 }
5619
5620 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
5621
5622 if (mbox)
5623 elsiocb->context_un.mbox = mbox;
5624
5625 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5626 pcmd += sizeof(uint32_t);
5627 sp = (struct serv_parm *)pcmd;
5628
5629 if (flag == ELS_CMD_FLOGI) {
5630 /* Copy the received service parameters back */
5631 memcpy(sp, &phba->fc_fabparam,
5632 sizeof(struct serv_parm));
5633
5634 /* Clear the F_Port bit */
5635 sp->cmn.fPort = 0;
5636
5637 /* Mark all class service parameters as invalid */
5638 sp->cls1.classValid = 0;
5639 sp->cls2.classValid = 0;
5640 sp->cls3.classValid = 0;
5641 sp->cls4.classValid = 0;
5642
5643 /* Copy our worldwide names */
5644 memcpy(&sp->portName, &vport->fc_sparam.portName,
5645 sizeof(struct lpfc_name));
5646 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
5647 sizeof(struct lpfc_name));
5648 } else {
5649 memcpy(pcmd, &vport->fc_sparam,
5650 sizeof(struct serv_parm));
5651
5652 sp->cmn.valid_vendor_ver_level = 0;
5653 memset(sp->un.vendorVersion, 0,
5654 sizeof(sp->un.vendorVersion));
5655 sp->cmn.bbRcvSizeMsb &= 0xF;
5656
5657 /* If our firmware supports this feature, convey that
5658 * info to the target using the vendor specific field.
5659 */
5660 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
5661 sp->cmn.valid_vendor_ver_level = 1;
5662 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
5663 sp->un.vv.flags =
5664 cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
5665 }
5666 }
5667
5668 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5669 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
5670 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5671 break;
5672 case ELS_CMD_PRLO:
5673 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
5674 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5675 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
5676 if (!elsiocb)
5677 return 1;
5678
5679 if (phba->sli_rev == LPFC_SLI_REV4) {
5680 wqe = &elsiocb->wqe;
5681 /* XRI / rx_id */
5682 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5683 bf_get(wqe_ctxt_tag,
5684 &oldwqe->xmit_els_rsp.wqe_com));
5685
5686 /* oxid */
5687 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5688 bf_get(wqe_rcvoxid,
5689 &oldwqe->xmit_els_rsp.wqe_com));
5690 } else {
5691 icmd = &elsiocb->iocb;
5692 oldcmd = &oldiocb->iocb;
5693 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5694 icmd->unsli3.rcvsli3.ox_id =
5695 oldcmd->unsli3.rcvsli3.ox_id;
5696 }
5697
5698 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt;
5699
5700 memcpy(pcmd, oldiocb->cmd_dmabuf->virt,
5701 sizeof(uint32_t) + sizeof(PRLO));
5702 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
5703 els_pkt_ptr = (ELS_PKT *) pcmd;
5704 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
5705
5706 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5707 "Issue ACC PRLO: did:x%x flg:x%x",
5708 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5709 break;
5710 case ELS_CMD_RDF:
5711 cmdsize = sizeof(*rdf_resp);
5712 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5713 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5714 if (!elsiocb)
5715 return 1;
5716
5717 if (phba->sli_rev == LPFC_SLI_REV4) {
5718 wqe = &elsiocb->wqe;
5719 /* XRI / rx_id */
5720 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5721 bf_get(wqe_ctxt_tag,
5722 &oldwqe->xmit_els_rsp.wqe_com));
5723
5724 /* oxid */
5725 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5726 bf_get(wqe_rcvoxid,
5727 &oldwqe->xmit_els_rsp.wqe_com));
5728 } else {
5729 icmd = &elsiocb->iocb;
5730 oldcmd = &oldiocb->iocb;
5731 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5732 icmd->unsli3.rcvsli3.ox_id =
5733 oldcmd->unsli3.rcvsli3.ox_id;
5734 }
5735
5736 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
5737 rdf_resp = (struct fc_els_rdf_resp *)pcmd;
5738 memset(rdf_resp, 0, sizeof(*rdf_resp));
5739 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC;
5740
5741 /* FC-LS-5 specifies desc_list_len shall be set to 12 */
5742 rdf_resp->desc_list_len = cpu_to_be32(12);
5743
5744 /* FC-LS-5 specifies LS REQ Information descriptor */
5745 rdf_resp->lsri.desc_tag = cpu_to_be32(1);
5746 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32));
5747 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF;
5748 break;
5749 default:
5750 return 1;
5751 }
5752 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
5753 spin_lock_irq(&ndlp->lock);
5754 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
5755 ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
5756 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5757 spin_unlock_irq(&ndlp->lock);
5758 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc;
5759 } else {
5760 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5761 }
5762
5763 phba->fc_stat.elsXmitACC++;
5764 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5765 if (!elsiocb->ndlp) {
5766 lpfc_els_free_iocb(phba, elsiocb);
5767 return 1;
5768 }
5769
5770 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5771 if (rc == IOCB_ERROR) {
5772 lpfc_els_free_iocb(phba, elsiocb);
5773 lpfc_nlp_put(ndlp);
5774 return 1;
5775 }
5776
5777 /* Xmit ELS ACC response tag <ulpIoTag> */
5778 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5779 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
5780 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
5781 "RPI: x%x, fc_flag x%x refcnt %d\n",
5782 rc, elsiocb->iotag, elsiocb->sli4_xritag,
5783 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5784 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref));
5785 return 0;
5786 }
5787
5788 /**
5789 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command
5790 * @vport: pointer to a virtual N_Port data structure.
5791 * @rejectError: reject response to issue
5792 * @oldiocb: pointer to the original lpfc command iocb data structure.
5793 * @ndlp: pointer to a node-list data structure.
5794 * @mbox: pointer to the driver internal queue element for mailbox command.
5795 *
5796 * This routine prepares and issue an Reject (RJT) response IOCB
5797 * command. If a @mbox pointer is passed in, it will be put into the
5798 * context_un.mbox field of the IOCB for the completion callback function
5799 * to issue to the HBA later.
5800 *
5801 * Note that the ndlp reference count will be incremented by 1 for holding the
5802 * ndlp and the reference to ndlp will be stored into the ndlp field of
5803 * the IOCB for the completion callback function to the reject response
5804 * ELS IOCB command.
5805 *
5806 * Return code
5807 * 0 - Successfully issued reject response
5808 * 1 - Failed to issue reject response
5809 **/
5810 int
lpfc_els_rsp_reject(struct lpfc_vport * vport,uint32_t rejectError,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp,LPFC_MBOXQ_t * mbox)5811 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
5812 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
5813 LPFC_MBOXQ_t *mbox)
5814 {
5815 int rc;
5816 struct lpfc_hba *phba = vport->phba;
5817 IOCB_t *icmd;
5818 IOCB_t *oldcmd;
5819 union lpfc_wqe128 *wqe;
5820 struct lpfc_iocbq *elsiocb;
5821 uint8_t *pcmd;
5822 uint16_t cmdsize;
5823
5824 cmdsize = 2 * sizeof(uint32_t);
5825 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5826 ndlp->nlp_DID, ELS_CMD_LS_RJT);
5827 if (!elsiocb)
5828 return 1;
5829
5830 if (phba->sli_rev == LPFC_SLI_REV4) {
5831 wqe = &elsiocb->wqe;
5832 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
5833 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
5834 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5835 get_job_rcvoxid(phba, oldiocb));
5836 } else {
5837 icmd = &elsiocb->iocb;
5838 oldcmd = &oldiocb->iocb;
5839 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5840 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5841 }
5842
5843 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
5844
5845 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
5846 pcmd += sizeof(uint32_t);
5847 *((uint32_t *) (pcmd)) = rejectError;
5848
5849 if (mbox)
5850 elsiocb->context_un.mbox = mbox;
5851
5852 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
5853 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5854 "0129 Xmit ELS RJT x%x response tag x%x "
5855 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
5856 "rpi x%x\n",
5857 rejectError, elsiocb->iotag,
5858 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID,
5859 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
5860 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5861 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
5862 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
5863
5864 phba->fc_stat.elsXmitLSRJT++;
5865 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5866 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5867 if (!elsiocb->ndlp) {
5868 lpfc_els_free_iocb(phba, elsiocb);
5869 return 1;
5870 }
5871
5872 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the
5873 * node's assigned RPI gets released provided this node is not already
5874 * registered with the transport.
5875 */
5876 if (phba->sli_rev == LPFC_SLI_REV4 &&
5877 vport->port_type == LPFC_NPIV_PORT &&
5878 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
5879 spin_lock_irq(&ndlp->lock);
5880 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5881 spin_unlock_irq(&ndlp->lock);
5882 }
5883
5884 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5885 if (rc == IOCB_ERROR) {
5886 lpfc_els_free_iocb(phba, elsiocb);
5887 lpfc_nlp_put(ndlp);
5888 return 1;
5889 }
5890
5891 return 0;
5892 }
5893
5894 /**
5895 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric.
5896 * @vport: pointer to a host virtual N_Port data structure.
5897 * @cmdiocb: pointer to the original lpfc command iocb data structure.
5898 * @ndlp: NPort to where rsp is directed
5899 *
5900 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate
5901 * this N_Port's support of hardware signals in its Congestion
5902 * Capabilities Descriptor.
5903 *
5904 * Return code
5905 * 0 - Successfully issued edc rsp command
5906 * 1 - Failed to issue edc rsp command
5907 **/
5908 static int
lpfc_issue_els_edc_rsp(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)5909 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5910 struct lpfc_nodelist *ndlp)
5911 {
5912 struct lpfc_hba *phba = vport->phba;
5913 struct fc_els_edc_resp *edc_rsp;
5914 struct fc_tlv_desc *tlv;
5915 struct lpfc_iocbq *elsiocb;
5916 IOCB_t *icmd, *cmd;
5917 union lpfc_wqe128 *wqe;
5918 u32 cgn_desc_size, lft_desc_size;
5919 u16 cmdsize;
5920 uint8_t *pcmd;
5921 int rc;
5922
5923 cmdsize = sizeof(struct fc_els_edc_resp);
5924 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
5925 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
5926 sizeof(struct fc_diag_lnkflt_desc) : 0;
5927 cmdsize += cgn_desc_size + lft_desc_size;
5928 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry,
5929 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5930 if (!elsiocb)
5931 return 1;
5932
5933 if (phba->sli_rev == LPFC_SLI_REV4) {
5934 wqe = &elsiocb->wqe;
5935 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
5936 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */
5937 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5938 get_job_rcvoxid(phba, cmdiocb));
5939 } else {
5940 icmd = &elsiocb->iocb;
5941 cmd = &cmdiocb->iocb;
5942 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */
5943 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
5944 }
5945
5946 pcmd = elsiocb->cmd_dmabuf->virt;
5947 memset(pcmd, 0, cmdsize);
5948
5949 edc_rsp = (struct fc_els_edc_resp *)pcmd;
5950 edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC;
5951 edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) +
5952 cgn_desc_size + lft_desc_size);
5953 edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
5954 edc_rsp->lsri.desc_len = cpu_to_be32(
5955 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc));
5956 edc_rsp->lsri.rqst_w0.cmd = ELS_EDC;
5957 tlv = edc_rsp->desc;
5958 lpfc_format_edc_cgn_desc(phba, tlv);
5959 tlv = fc_tlv_next_desc(tlv);
5960 if (lft_desc_size)
5961 lpfc_format_edc_lft_desc(phba, tlv);
5962
5963 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5964 "Issue EDC ACC: did:x%x flg:x%x refcnt %d",
5965 ndlp->nlp_DID, ndlp->nlp_flag,
5966 kref_read(&ndlp->kref));
5967 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5968
5969 phba->fc_stat.elsXmitACC++;
5970 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5971 if (!elsiocb->ndlp) {
5972 lpfc_els_free_iocb(phba, elsiocb);
5973 return 1;
5974 }
5975
5976 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5977 if (rc == IOCB_ERROR) {
5978 lpfc_els_free_iocb(phba, elsiocb);
5979 lpfc_nlp_put(ndlp);
5980 return 1;
5981 }
5982
5983 /* Xmit ELS ACC response tag <ulpIoTag> */
5984 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5985 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, "
5986 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
5987 "RPI: x%x, fc_flag x%x\n",
5988 rc, elsiocb->iotag, elsiocb->sli4_xritag,
5989 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5990 ndlp->nlp_rpi, vport->fc_flag);
5991
5992 return 0;
5993 }
5994
5995 /**
5996 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
5997 * @vport: pointer to a virtual N_Port data structure.
5998 * @oldiocb: pointer to the original lpfc command iocb data structure.
5999 * @ndlp: pointer to a node-list data structure.
6000 *
6001 * This routine prepares and issues an Accept (ACC) response to Address
6002 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
6003 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
6004 *
6005 * Note that the ndlp reference count will be incremented by 1 for holding the
6006 * ndlp and the reference to ndlp will be stored into the ndlp field of
6007 * the IOCB for the completion callback function to the ADISC Accept response
6008 * ELS IOCB command.
6009 *
6010 * Return code
6011 * 0 - Successfully issued acc adisc response
6012 * 1 - Failed to issue adisc acc response
6013 **/
6014 int
lpfc_els_rsp_adisc_acc(struct lpfc_vport * vport,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)6015 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
6016 struct lpfc_nodelist *ndlp)
6017 {
6018 struct lpfc_hba *phba = vport->phba;
6019 ADISC *ap;
6020 IOCB_t *icmd, *oldcmd;
6021 union lpfc_wqe128 *wqe;
6022 struct lpfc_iocbq *elsiocb;
6023 uint8_t *pcmd;
6024 uint16_t cmdsize;
6025 int rc;
6026 u32 ulp_context;
6027
6028 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
6029 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6030 ndlp->nlp_DID, ELS_CMD_ACC);
6031 if (!elsiocb)
6032 return 1;
6033
6034 if (phba->sli_rev == LPFC_SLI_REV4) {
6035 wqe = &elsiocb->wqe;
6036 /* XRI / rx_id */
6037 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6038 get_job_ulpcontext(phba, oldiocb));
6039 ulp_context = get_job_ulpcontext(phba, elsiocb);
6040 /* oxid */
6041 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6042 get_job_rcvoxid(phba, oldiocb));
6043 } else {
6044 icmd = &elsiocb->iocb;
6045 oldcmd = &oldiocb->iocb;
6046 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6047 ulp_context = elsiocb->iocb.ulpContext;
6048 icmd->unsli3.rcvsli3.ox_id =
6049 oldcmd->unsli3.rcvsli3.ox_id;
6050 }
6051
6052 /* Xmit ADISC ACC response tag <ulpIoTag> */
6053 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6054 "0130 Xmit ADISC ACC response iotag x%x xri: "
6055 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
6056 elsiocb->iotag, ulp_context,
6057 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6058 ndlp->nlp_rpi);
6059 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6060
6061 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6062 pcmd += sizeof(uint32_t);
6063
6064 ap = (ADISC *) (pcmd);
6065 ap->hardAL_PA = phba->fc_pref_ALPA;
6066 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
6067 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
6068 ap->DID = be32_to_cpu(vport->fc_myDID);
6069
6070 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6071 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d",
6072 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
6073
6074 phba->fc_stat.elsXmitACC++;
6075 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6076 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6077 if (!elsiocb->ndlp) {
6078 lpfc_els_free_iocb(phba, elsiocb);
6079 return 1;
6080 }
6081
6082 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6083 if (rc == IOCB_ERROR) {
6084 lpfc_els_free_iocb(phba, elsiocb);
6085 lpfc_nlp_put(ndlp);
6086 return 1;
6087 }
6088
6089 return 0;
6090 }
6091
6092 /**
6093 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
6094 * @vport: pointer to a virtual N_Port data structure.
6095 * @oldiocb: pointer to the original lpfc command iocb data structure.
6096 * @ndlp: pointer to a node-list data structure.
6097 *
6098 * This routine prepares and issues an Accept (ACC) response to Process
6099 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
6100 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
6101 *
6102 * Note that the ndlp reference count will be incremented by 1 for holding the
6103 * ndlp and the reference to ndlp will be stored into the ndlp field of
6104 * the IOCB for the completion callback function to the PRLI Accept response
6105 * ELS IOCB command.
6106 *
6107 * Return code
6108 * 0 - Successfully issued acc prli response
6109 * 1 - Failed to issue acc prli response
6110 **/
6111 int
lpfc_els_rsp_prli_acc(struct lpfc_vport * vport,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)6112 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
6113 struct lpfc_nodelist *ndlp)
6114 {
6115 struct lpfc_hba *phba = vport->phba;
6116 PRLI *npr;
6117 struct lpfc_nvme_prli *npr_nvme;
6118 lpfc_vpd_t *vpd;
6119 IOCB_t *icmd;
6120 IOCB_t *oldcmd;
6121 union lpfc_wqe128 *wqe;
6122 struct lpfc_iocbq *elsiocb;
6123 uint8_t *pcmd;
6124 uint16_t cmdsize;
6125 uint32_t prli_fc4_req, *req_payload;
6126 struct lpfc_dmabuf *req_buf;
6127 int rc;
6128 u32 elsrspcmd, ulp_context;
6129
6130 /* Need the incoming PRLI payload to determine if the ACC is for an
6131 * FC4 or NVME PRLI type. The PRLI type is at word 1.
6132 */
6133 req_buf = oldiocb->cmd_dmabuf;
6134 req_payload = (((uint32_t *)req_buf->virt) + 1);
6135
6136 /* PRLI type payload is at byte 3 for FCP or NVME. */
6137 prli_fc4_req = be32_to_cpu(*req_payload);
6138 prli_fc4_req = (prli_fc4_req >> 24) & 0xff;
6139 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6140 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n",
6141 prli_fc4_req, *((uint32_t *)req_payload));
6142
6143 if (prli_fc4_req == PRLI_FCP_TYPE) {
6144 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
6145 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
6146 } else if (prli_fc4_req == PRLI_NVME_TYPE) {
6147 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
6148 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
6149 } else {
6150 return 1;
6151 }
6152
6153 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6154 ndlp->nlp_DID, elsrspcmd);
6155 if (!elsiocb)
6156 return 1;
6157
6158 if (phba->sli_rev == LPFC_SLI_REV4) {
6159 wqe = &elsiocb->wqe;
6160 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6161 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
6162 ulp_context = get_job_ulpcontext(phba, elsiocb);
6163 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6164 get_job_rcvoxid(phba, oldiocb));
6165 } else {
6166 icmd = &elsiocb->iocb;
6167 oldcmd = &oldiocb->iocb;
6168 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6169 ulp_context = elsiocb->iocb.ulpContext;
6170 icmd->unsli3.rcvsli3.ox_id =
6171 oldcmd->unsli3.rcvsli3.ox_id;
6172 }
6173
6174 /* Xmit PRLI ACC response tag <ulpIoTag> */
6175 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6176 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
6177 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
6178 elsiocb->iotag, ulp_context,
6179 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6180 ndlp->nlp_rpi);
6181 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6182 memset(pcmd, 0, cmdsize);
6183
6184 *((uint32_t *)(pcmd)) = elsrspcmd;
6185 pcmd += sizeof(uint32_t);
6186
6187 /* For PRLI, remainder of payload is PRLI parameter page */
6188 vpd = &phba->vpd;
6189
6190 if (prli_fc4_req == PRLI_FCP_TYPE) {
6191 /*
6192 * If the remote port is a target and our firmware version
6193 * is 3.20 or later, set the following bits for FC-TAPE
6194 * support.
6195 */
6196 npr = (PRLI *) pcmd;
6197 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
6198 (vpd->rev.feaLevelHigh >= 0x02)) {
6199 npr->ConfmComplAllowed = 1;
6200 npr->Retry = 1;
6201 npr->TaskRetryIdReq = 1;
6202 }
6203 npr->acceptRspCode = PRLI_REQ_EXECUTED;
6204
6205 /* Set image pair for complementary pairs only. */
6206 if (ndlp->nlp_type & NLP_FCP_TARGET)
6207 npr->estabImagePair = 1;
6208 else
6209 npr->estabImagePair = 0;
6210 npr->readXferRdyDis = 1;
6211 npr->ConfmComplAllowed = 1;
6212 npr->prliType = PRLI_FCP_TYPE;
6213 npr->initiatorFunc = 1;
6214
6215 /* Xmit PRLI ACC response tag <ulpIoTag> */
6216 lpfc_printf_vlog(vport, KERN_INFO,
6217 LOG_ELS | LOG_NODE | LOG_DISCOVERY,
6218 "6014 FCP issue PRLI ACC imgpair %d "
6219 "retry %d task %d\n",
6220 npr->estabImagePair,
6221 npr->Retry, npr->TaskRetryIdReq);
6222
6223 } else if (prli_fc4_req == PRLI_NVME_TYPE) {
6224 /* Respond with an NVME PRLI Type */
6225 npr_nvme = (struct lpfc_nvme_prli *) pcmd;
6226 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
6227 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
6228 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
6229 if (phba->nvmet_support) {
6230 bf_set(prli_tgt, npr_nvme, 1);
6231 bf_set(prli_disc, npr_nvme, 1);
6232 if (phba->cfg_nvme_enable_fb) {
6233 bf_set(prli_fba, npr_nvme, 1);
6234
6235 /* TBD. Target mode needs to post buffers
6236 * that support the configured first burst
6237 * byte size.
6238 */
6239 bf_set(prli_fb_sz, npr_nvme,
6240 phba->cfg_nvmet_fb_size);
6241 }
6242 } else {
6243 bf_set(prli_init, npr_nvme, 1);
6244 }
6245
6246 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
6247 "6015 NVME issue PRLI ACC word1 x%08x "
6248 "word4 x%08x word5 x%08x flag x%x, "
6249 "fcp_info x%x nlp_type x%x\n",
6250 npr_nvme->word1, npr_nvme->word4,
6251 npr_nvme->word5, ndlp->nlp_flag,
6252 ndlp->nlp_fcp_info, ndlp->nlp_type);
6253 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
6254 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
6255 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5);
6256 } else
6257 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6258 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
6259 prli_fc4_req, ndlp->nlp_fc4_type,
6260 ndlp->nlp_DID);
6261
6262 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6263 "Issue ACC PRLI: did:x%x flg:x%x",
6264 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
6265
6266 phba->fc_stat.elsXmitACC++;
6267 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6268 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6269 if (!elsiocb->ndlp) {
6270 lpfc_els_free_iocb(phba, elsiocb);
6271 return 1;
6272 }
6273
6274 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6275 if (rc == IOCB_ERROR) {
6276 lpfc_els_free_iocb(phba, elsiocb);
6277 lpfc_nlp_put(ndlp);
6278 return 1;
6279 }
6280
6281 return 0;
6282 }
6283
6284 /**
6285 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
6286 * @vport: pointer to a virtual N_Port data structure.
6287 * @format: rnid command format.
6288 * @oldiocb: pointer to the original lpfc command iocb data structure.
6289 * @ndlp: pointer to a node-list data structure.
6290 *
6291 * This routine issues a Request Node Identification Data (RNID) Accept
6292 * (ACC) response. It constructs the RNID ACC response command according to
6293 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
6294 * issue the response.
6295 *
6296 * Note that the ndlp reference count will be incremented by 1 for holding the
6297 * ndlp and the reference to ndlp will be stored into the ndlp field of
6298 * the IOCB for the completion callback function.
6299 *
6300 * Return code
6301 * 0 - Successfully issued acc rnid response
6302 * 1 - Failed to issue acc rnid response
6303 **/
6304 static int
lpfc_els_rsp_rnid_acc(struct lpfc_vport * vport,uint8_t format,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)6305 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
6306 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
6307 {
6308 struct lpfc_hba *phba = vport->phba;
6309 RNID *rn;
6310 IOCB_t *icmd, *oldcmd;
6311 union lpfc_wqe128 *wqe;
6312 struct lpfc_iocbq *elsiocb;
6313 uint8_t *pcmd;
6314 uint16_t cmdsize;
6315 int rc;
6316 u32 ulp_context;
6317
6318 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
6319 + (2 * sizeof(struct lpfc_name));
6320 if (format)
6321 cmdsize += sizeof(RNID_TOP_DISC);
6322
6323 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6324 ndlp->nlp_DID, ELS_CMD_ACC);
6325 if (!elsiocb)
6326 return 1;
6327
6328 if (phba->sli_rev == LPFC_SLI_REV4) {
6329 wqe = &elsiocb->wqe;
6330 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6331 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
6332 ulp_context = get_job_ulpcontext(phba, elsiocb);
6333 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6334 get_job_rcvoxid(phba, oldiocb));
6335 } else {
6336 icmd = &elsiocb->iocb;
6337 oldcmd = &oldiocb->iocb;
6338 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6339 ulp_context = elsiocb->iocb.ulpContext;
6340 icmd->unsli3.rcvsli3.ox_id =
6341 oldcmd->unsli3.rcvsli3.ox_id;
6342 }
6343
6344 /* Xmit RNID ACC response tag <ulpIoTag> */
6345 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6346 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
6347 elsiocb->iotag, ulp_context);
6348 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6349 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6350 pcmd += sizeof(uint32_t);
6351
6352 memset(pcmd, 0, sizeof(RNID));
6353 rn = (RNID *) (pcmd);
6354 rn->Format = format;
6355 rn->CommonLen = (2 * sizeof(struct lpfc_name));
6356 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
6357 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
6358 switch (format) {
6359 case 0:
6360 rn->SpecificLen = 0;
6361 break;
6362 case RNID_TOPOLOGY_DISC:
6363 rn->SpecificLen = sizeof(RNID_TOP_DISC);
6364 memcpy(&rn->un.topologyDisc.portName,
6365 &vport->fc_portname, sizeof(struct lpfc_name));
6366 rn->un.topologyDisc.unitType = RNID_HBA;
6367 rn->un.topologyDisc.physPort = 0;
6368 rn->un.topologyDisc.attachedNodes = 0;
6369 break;
6370 default:
6371 rn->CommonLen = 0;
6372 rn->SpecificLen = 0;
6373 break;
6374 }
6375
6376 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6377 "Issue ACC RNID: did:x%x flg:x%x refcnt %d",
6378 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
6379
6380 phba->fc_stat.elsXmitACC++;
6381 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6382 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6383 if (!elsiocb->ndlp) {
6384 lpfc_els_free_iocb(phba, elsiocb);
6385 return 1;
6386 }
6387
6388 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6389 if (rc == IOCB_ERROR) {
6390 lpfc_els_free_iocb(phba, elsiocb);
6391 lpfc_nlp_put(ndlp);
6392 return 1;
6393 }
6394
6395 return 0;
6396 }
6397
6398 /**
6399 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
6400 * @vport: pointer to a virtual N_Port data structure.
6401 * @iocb: pointer to the lpfc command iocb data structure.
6402 * @ndlp: pointer to a node-list data structure.
6403 *
6404 * Return
6405 **/
6406 static void
lpfc_els_clear_rrq(struct lpfc_vport * vport,struct lpfc_iocbq * iocb,struct lpfc_nodelist * ndlp)6407 lpfc_els_clear_rrq(struct lpfc_vport *vport,
6408 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
6409 {
6410 struct lpfc_hba *phba = vport->phba;
6411 uint8_t *pcmd;
6412 struct RRQ *rrq;
6413 uint16_t rxid;
6414 uint16_t xri;
6415 struct lpfc_node_rrq *prrq;
6416
6417
6418 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt;
6419 pcmd += sizeof(uint32_t);
6420 rrq = (struct RRQ *)pcmd;
6421 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
6422 rxid = bf_get(rrq_rxid, rrq);
6423
6424 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6425 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
6426 " x%x x%x\n",
6427 be32_to_cpu(bf_get(rrq_did, rrq)),
6428 bf_get(rrq_oxid, rrq),
6429 rxid,
6430 get_wqe_reqtag(iocb),
6431 get_job_ulpcontext(phba, iocb));
6432
6433 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6434 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
6435 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
6436 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
6437 xri = bf_get(rrq_oxid, rrq);
6438 else
6439 xri = rxid;
6440 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
6441 if (prrq)
6442 lpfc_clr_rrq_active(phba, xri, prrq);
6443 return;
6444 }
6445
6446 /**
6447 * lpfc_els_rsp_echo_acc - Issue echo acc response
6448 * @vport: pointer to a virtual N_Port data structure.
6449 * @data: pointer to echo data to return in the accept.
6450 * @oldiocb: pointer to the original lpfc command iocb data structure.
6451 * @ndlp: pointer to a node-list data structure.
6452 *
6453 * Return code
6454 * 0 - Successfully issued acc echo response
6455 * 1 - Failed to issue acc echo response
6456 **/
6457 static int
lpfc_els_rsp_echo_acc(struct lpfc_vport * vport,uint8_t * data,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)6458 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
6459 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
6460 {
6461 struct lpfc_hba *phba = vport->phba;
6462 IOCB_t *icmd, *oldcmd;
6463 union lpfc_wqe128 *wqe;
6464 struct lpfc_iocbq *elsiocb;
6465 uint8_t *pcmd;
6466 uint16_t cmdsize;
6467 int rc;
6468 u32 ulp_context;
6469
6470 if (phba->sli_rev == LPFC_SLI_REV4)
6471 cmdsize = oldiocb->wcqe_cmpl.total_data_placed;
6472 else
6473 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
6474
6475 /* The accumulated length can exceed the BPL_SIZE. For
6476 * now, use this as the limit
6477 */
6478 if (cmdsize > LPFC_BPL_SIZE)
6479 cmdsize = LPFC_BPL_SIZE;
6480 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6481 ndlp->nlp_DID, ELS_CMD_ACC);
6482 if (!elsiocb)
6483 return 1;
6484
6485 if (phba->sli_rev == LPFC_SLI_REV4) {
6486 wqe = &elsiocb->wqe;
6487 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6488 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
6489 ulp_context = get_job_ulpcontext(phba, elsiocb);
6490 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6491 get_job_rcvoxid(phba, oldiocb));
6492 } else {
6493 icmd = &elsiocb->iocb;
6494 oldcmd = &oldiocb->iocb;
6495 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6496 ulp_context = elsiocb->iocb.ulpContext;
6497 icmd->unsli3.rcvsli3.ox_id =
6498 oldcmd->unsli3.rcvsli3.ox_id;
6499 }
6500
6501 /* Xmit ECHO ACC response tag <ulpIoTag> */
6502 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6503 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
6504 elsiocb->iotag, ulp_context);
6505 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6506 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6507 pcmd += sizeof(uint32_t);
6508 memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
6509
6510 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6511 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d",
6512 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
6513
6514 phba->fc_stat.elsXmitACC++;
6515 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6516 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6517 if (!elsiocb->ndlp) {
6518 lpfc_els_free_iocb(phba, elsiocb);
6519 return 1;
6520 }
6521
6522 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6523 if (rc == IOCB_ERROR) {
6524 lpfc_els_free_iocb(phba, elsiocb);
6525 lpfc_nlp_put(ndlp);
6526 return 1;
6527 }
6528
6529 return 0;
6530 }
6531
6532 /**
6533 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
6534 * @vport: pointer to a host virtual N_Port data structure.
6535 *
6536 * This routine issues Address Discover (ADISC) ELS commands to those
6537 * N_Ports which are in node port recovery state and ADISC has not been issued
6538 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
6539 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
6540 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
6541 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
6542 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
6543 * IOCBs quit for later pick up. On the other hand, after walking through
6544 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
6545 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
6546 * no more ADISC need to be sent.
6547 *
6548 * Return code
6549 * The number of N_Ports with adisc issued.
6550 **/
6551 int
lpfc_els_disc_adisc(struct lpfc_vport * vport)6552 lpfc_els_disc_adisc(struct lpfc_vport *vport)
6553 {
6554 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6555 struct lpfc_nodelist *ndlp, *next_ndlp;
6556 int sentadisc = 0;
6557
6558 /* go thru NPR nodes and issue any remaining ELS ADISCs */
6559 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
6560
6561 if (ndlp->nlp_state != NLP_STE_NPR_NODE ||
6562 !(ndlp->nlp_flag & NLP_NPR_ADISC))
6563 continue;
6564
6565 spin_lock_irq(&ndlp->lock);
6566 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
6567 spin_unlock_irq(&ndlp->lock);
6568
6569 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
6570 /* This node was marked for ADISC but was not picked
6571 * for discovery. This is possible if the node was
6572 * missing in gidft response.
6573 *
6574 * At time of marking node for ADISC, we skipped unreg
6575 * from backend
6576 */
6577 lpfc_nlp_unreg_node(vport, ndlp);
6578 lpfc_unreg_rpi(vport, ndlp);
6579 continue;
6580 }
6581
6582 ndlp->nlp_prev_state = ndlp->nlp_state;
6583 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
6584 lpfc_issue_els_adisc(vport, ndlp, 0);
6585 sentadisc++;
6586 vport->num_disc_nodes++;
6587 if (vport->num_disc_nodes >=
6588 vport->cfg_discovery_threads) {
6589 spin_lock_irq(shost->host_lock);
6590 vport->fc_flag |= FC_NLP_MORE;
6591 spin_unlock_irq(shost->host_lock);
6592 break;
6593 }
6594
6595 }
6596 if (sentadisc == 0) {
6597 spin_lock_irq(shost->host_lock);
6598 vport->fc_flag &= ~FC_NLP_MORE;
6599 spin_unlock_irq(shost->host_lock);
6600 }
6601 return sentadisc;
6602 }
6603
6604 /**
6605 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
6606 * @vport: pointer to a host virtual N_Port data structure.
6607 *
6608 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
6609 * which are in node port recovery state, with a @vport. Each time an ELS
6610 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
6611 * the per @vport number of discover count (num_disc_nodes) shall be
6612 * incremented. If the num_disc_nodes reaches a pre-configured threshold
6613 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
6614 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
6615 * later pick up. On the other hand, after walking through all the ndlps with
6616 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
6617 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
6618 * PLOGI need to be sent.
6619 *
6620 * Return code
6621 * The number of N_Ports with plogi issued.
6622 **/
6623 int
lpfc_els_disc_plogi(struct lpfc_vport * vport)6624 lpfc_els_disc_plogi(struct lpfc_vport *vport)
6625 {
6626 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6627 struct lpfc_nodelist *ndlp, *next_ndlp;
6628 int sentplogi = 0;
6629
6630 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
6631 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
6632 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
6633 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
6634 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
6635 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
6636 ndlp->nlp_prev_state = ndlp->nlp_state;
6637 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6638 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
6639 sentplogi++;
6640 vport->num_disc_nodes++;
6641 if (vport->num_disc_nodes >=
6642 vport->cfg_discovery_threads) {
6643 spin_lock_irq(shost->host_lock);
6644 vport->fc_flag |= FC_NLP_MORE;
6645 spin_unlock_irq(shost->host_lock);
6646 break;
6647 }
6648 }
6649 }
6650
6651 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6652 "6452 Discover PLOGI %d flag x%x\n",
6653 sentplogi, vport->fc_flag);
6654
6655 if (sentplogi) {
6656 lpfc_set_disctmo(vport);
6657 }
6658 else {
6659 spin_lock_irq(shost->host_lock);
6660 vport->fc_flag &= ~FC_NLP_MORE;
6661 spin_unlock_irq(shost->host_lock);
6662 }
6663 return sentplogi;
6664 }
6665
6666 static uint32_t
lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc * desc,uint32_t word0)6667 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
6668 uint32_t word0)
6669 {
6670
6671 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
6672 desc->payload.els_req = word0;
6673 desc->length = cpu_to_be32(sizeof(desc->payload));
6674
6675 return sizeof(struct fc_rdp_link_service_desc);
6676 }
6677
6678 static uint32_t
lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc * desc,uint8_t * page_a0,uint8_t * page_a2)6679 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
6680 uint8_t *page_a0, uint8_t *page_a2)
6681 {
6682 uint16_t wavelength;
6683 uint16_t temperature;
6684 uint16_t rx_power;
6685 uint16_t tx_bias;
6686 uint16_t tx_power;
6687 uint16_t vcc;
6688 uint16_t flag = 0;
6689 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
6690 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
6691
6692 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
6693
6694 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
6695 &page_a0[SSF_TRANSCEIVER_CODE_B4];
6696 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
6697 &page_a0[SSF_TRANSCEIVER_CODE_B5];
6698
6699 if ((trasn_code_byte4->fc_sw_laser) ||
6700 (trasn_code_byte5->fc_sw_laser_sl) ||
6701 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */
6702 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
6703 } else if (trasn_code_byte4->fc_lw_laser) {
6704 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
6705 page_a0[SSF_WAVELENGTH_B0];
6706 if (wavelength == SFP_WAVELENGTH_LC1310)
6707 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
6708 if (wavelength == SFP_WAVELENGTH_LL1550)
6709 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
6710 }
6711 /* check if its SFP+ */
6712 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
6713 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
6714 << SFP_FLAG_CT_SHIFT;
6715
6716 /* check if its OPTICAL */
6717 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
6718 SFP_FLAG_IS_OPTICAL_PORT : 0)
6719 << SFP_FLAG_IS_OPTICAL_SHIFT;
6720
6721 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
6722 page_a2[SFF_TEMPERATURE_B0]);
6723 vcc = (page_a2[SFF_VCC_B1] << 8 |
6724 page_a2[SFF_VCC_B0]);
6725 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
6726 page_a2[SFF_TXPOWER_B0]);
6727 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
6728 page_a2[SFF_TX_BIAS_CURRENT_B0]);
6729 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
6730 page_a2[SFF_RXPOWER_B0]);
6731 desc->sfp_info.temperature = cpu_to_be16(temperature);
6732 desc->sfp_info.rx_power = cpu_to_be16(rx_power);
6733 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
6734 desc->sfp_info.tx_power = cpu_to_be16(tx_power);
6735 desc->sfp_info.vcc = cpu_to_be16(vcc);
6736
6737 desc->sfp_info.flags = cpu_to_be16(flag);
6738 desc->length = cpu_to_be32(sizeof(desc->sfp_info));
6739
6740 return sizeof(struct fc_rdp_sfp_desc);
6741 }
6742
6743 static uint32_t
lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc * desc,READ_LNK_VAR * stat)6744 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
6745 READ_LNK_VAR *stat)
6746 {
6747 uint32_t type;
6748
6749 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
6750
6751 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
6752
6753 desc->info.port_type = cpu_to_be32(type);
6754
6755 desc->info.link_status.link_failure_cnt =
6756 cpu_to_be32(stat->linkFailureCnt);
6757 desc->info.link_status.loss_of_synch_cnt =
6758 cpu_to_be32(stat->lossSyncCnt);
6759 desc->info.link_status.loss_of_signal_cnt =
6760 cpu_to_be32(stat->lossSignalCnt);
6761 desc->info.link_status.primitive_seq_proto_err =
6762 cpu_to_be32(stat->primSeqErrCnt);
6763 desc->info.link_status.invalid_trans_word =
6764 cpu_to_be32(stat->invalidXmitWord);
6765 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
6766
6767 desc->length = cpu_to_be32(sizeof(desc->info));
6768
6769 return sizeof(struct fc_rdp_link_error_status_desc);
6770 }
6771
6772 static uint32_t
lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc * desc,READ_LNK_VAR * stat,struct lpfc_vport * vport)6773 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
6774 struct lpfc_vport *vport)
6775 {
6776 uint32_t bbCredit;
6777
6778 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
6779
6780 bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
6781 (vport->fc_sparam.cmn.bbCreditMsb << 8);
6782 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
6783 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
6784 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
6785 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
6786 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
6787 } else {
6788 desc->bbc_info.attached_port_bbc = 0;
6789 }
6790
6791 desc->bbc_info.rtt = 0;
6792 desc->length = cpu_to_be32(sizeof(desc->bbc_info));
6793
6794 return sizeof(struct fc_rdp_bbc_desc);
6795 }
6796
6797 static uint32_t
lpfc_rdp_res_oed_temp_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6798 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
6799 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
6800 {
6801 uint32_t flags = 0;
6802
6803 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6804
6805 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
6806 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
6807 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
6808 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
6809
6810 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
6811 flags |= RDP_OET_HIGH_ALARM;
6812 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
6813 flags |= RDP_OET_LOW_ALARM;
6814 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
6815 flags |= RDP_OET_HIGH_WARNING;
6816 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
6817 flags |= RDP_OET_LOW_WARNING;
6818
6819 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
6820 desc->oed_info.function_flags = cpu_to_be32(flags);
6821 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6822 return sizeof(struct fc_rdp_oed_sfp_desc);
6823 }
6824
6825 static uint32_t
lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6826 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
6827 struct fc_rdp_oed_sfp_desc *desc,
6828 uint8_t *page_a2)
6829 {
6830 uint32_t flags = 0;
6831
6832 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6833
6834 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
6835 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
6836 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
6837 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
6838
6839 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
6840 flags |= RDP_OET_HIGH_ALARM;
6841 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
6842 flags |= RDP_OET_LOW_ALARM;
6843 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
6844 flags |= RDP_OET_HIGH_WARNING;
6845 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
6846 flags |= RDP_OET_LOW_WARNING;
6847
6848 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
6849 desc->oed_info.function_flags = cpu_to_be32(flags);
6850 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6851 return sizeof(struct fc_rdp_oed_sfp_desc);
6852 }
6853
6854 static uint32_t
lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6855 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
6856 struct fc_rdp_oed_sfp_desc *desc,
6857 uint8_t *page_a2)
6858 {
6859 uint32_t flags = 0;
6860
6861 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6862
6863 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
6864 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
6865 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
6866 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
6867
6868 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
6869 flags |= RDP_OET_HIGH_ALARM;
6870 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
6871 flags |= RDP_OET_LOW_ALARM;
6872 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
6873 flags |= RDP_OET_HIGH_WARNING;
6874 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
6875 flags |= RDP_OET_LOW_WARNING;
6876
6877 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
6878 desc->oed_info.function_flags = cpu_to_be32(flags);
6879 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6880 return sizeof(struct fc_rdp_oed_sfp_desc);
6881 }
6882
6883 static uint32_t
lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6884 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
6885 struct fc_rdp_oed_sfp_desc *desc,
6886 uint8_t *page_a2)
6887 {
6888 uint32_t flags = 0;
6889
6890 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6891
6892 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
6893 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
6894 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
6895 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
6896
6897 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
6898 flags |= RDP_OET_HIGH_ALARM;
6899 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
6900 flags |= RDP_OET_LOW_ALARM;
6901 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
6902 flags |= RDP_OET_HIGH_WARNING;
6903 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
6904 flags |= RDP_OET_LOW_WARNING;
6905
6906 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
6907 desc->oed_info.function_flags = cpu_to_be32(flags);
6908 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6909 return sizeof(struct fc_rdp_oed_sfp_desc);
6910 }
6911
6912
6913 static uint32_t
lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6914 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
6915 struct fc_rdp_oed_sfp_desc *desc,
6916 uint8_t *page_a2)
6917 {
6918 uint32_t flags = 0;
6919
6920 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6921
6922 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
6923 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
6924 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
6925 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
6926
6927 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
6928 flags |= RDP_OET_HIGH_ALARM;
6929 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
6930 flags |= RDP_OET_LOW_ALARM;
6931 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
6932 flags |= RDP_OET_HIGH_WARNING;
6933 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
6934 flags |= RDP_OET_LOW_WARNING;
6935
6936 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
6937 desc->oed_info.function_flags = cpu_to_be32(flags);
6938 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6939 return sizeof(struct fc_rdp_oed_sfp_desc);
6940 }
6941
6942 static uint32_t
lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc * desc,uint8_t * page_a0,struct lpfc_vport * vport)6943 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
6944 uint8_t *page_a0, struct lpfc_vport *vport)
6945 {
6946 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
6947 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
6948 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
6949 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
6950 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4);
6951 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
6952 desc->length = cpu_to_be32(sizeof(desc->opd_info));
6953 return sizeof(struct fc_rdp_opd_sfp_desc);
6954 }
6955
6956 static uint32_t
lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc * desc,READ_LNK_VAR * stat)6957 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
6958 {
6959 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
6960 return 0;
6961 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG);
6962
6963 desc->info.CorrectedBlocks =
6964 cpu_to_be32(stat->fecCorrBlkCount);
6965 desc->info.UncorrectableBlocks =
6966 cpu_to_be32(stat->fecUncorrBlkCount);
6967
6968 desc->length = cpu_to_be32(sizeof(desc->info));
6969
6970 return sizeof(struct fc_fec_rdp_desc);
6971 }
6972
6973 static uint32_t
lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc * desc,struct lpfc_hba * phba)6974 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
6975 {
6976 uint16_t rdp_cap = 0;
6977 uint16_t rdp_speed;
6978
6979 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
6980
6981 switch (phba->fc_linkspeed) {
6982 case LPFC_LINK_SPEED_1GHZ:
6983 rdp_speed = RDP_PS_1GB;
6984 break;
6985 case LPFC_LINK_SPEED_2GHZ:
6986 rdp_speed = RDP_PS_2GB;
6987 break;
6988 case LPFC_LINK_SPEED_4GHZ:
6989 rdp_speed = RDP_PS_4GB;
6990 break;
6991 case LPFC_LINK_SPEED_8GHZ:
6992 rdp_speed = RDP_PS_8GB;
6993 break;
6994 case LPFC_LINK_SPEED_10GHZ:
6995 rdp_speed = RDP_PS_10GB;
6996 break;
6997 case LPFC_LINK_SPEED_16GHZ:
6998 rdp_speed = RDP_PS_16GB;
6999 break;
7000 case LPFC_LINK_SPEED_32GHZ:
7001 rdp_speed = RDP_PS_32GB;
7002 break;
7003 case LPFC_LINK_SPEED_64GHZ:
7004 rdp_speed = RDP_PS_64GB;
7005 break;
7006 case LPFC_LINK_SPEED_128GHZ:
7007 rdp_speed = RDP_PS_128GB;
7008 break;
7009 case LPFC_LINK_SPEED_256GHZ:
7010 rdp_speed = RDP_PS_256GB;
7011 break;
7012 default:
7013 rdp_speed = RDP_PS_UNKNOWN;
7014 break;
7015 }
7016
7017 desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
7018
7019 if (phba->lmt & LMT_256Gb)
7020 rdp_cap |= RDP_PS_256GB;
7021 if (phba->lmt & LMT_128Gb)
7022 rdp_cap |= RDP_PS_128GB;
7023 if (phba->lmt & LMT_64Gb)
7024 rdp_cap |= RDP_PS_64GB;
7025 if (phba->lmt & LMT_32Gb)
7026 rdp_cap |= RDP_PS_32GB;
7027 if (phba->lmt & LMT_16Gb)
7028 rdp_cap |= RDP_PS_16GB;
7029 if (phba->lmt & LMT_10Gb)
7030 rdp_cap |= RDP_PS_10GB;
7031 if (phba->lmt & LMT_8Gb)
7032 rdp_cap |= RDP_PS_8GB;
7033 if (phba->lmt & LMT_4Gb)
7034 rdp_cap |= RDP_PS_4GB;
7035 if (phba->lmt & LMT_2Gb)
7036 rdp_cap |= RDP_PS_2GB;
7037 if (phba->lmt & LMT_1Gb)
7038 rdp_cap |= RDP_PS_1GB;
7039
7040 if (rdp_cap == 0)
7041 rdp_cap = RDP_CAP_UNKNOWN;
7042 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
7043 rdp_cap |= RDP_CAP_USER_CONFIGURED;
7044
7045 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
7046 desc->length = cpu_to_be32(sizeof(desc->info));
7047 return sizeof(struct fc_rdp_port_speed_desc);
7048 }
7049
7050 static uint32_t
lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc * desc,struct lpfc_vport * vport)7051 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
7052 struct lpfc_vport *vport)
7053 {
7054
7055 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
7056
7057 memcpy(desc->port_names.wwnn, &vport->fc_nodename,
7058 sizeof(desc->port_names.wwnn));
7059
7060 memcpy(desc->port_names.wwpn, &vport->fc_portname,
7061 sizeof(desc->port_names.wwpn));
7062
7063 desc->length = cpu_to_be32(sizeof(desc->port_names));
7064 return sizeof(struct fc_rdp_port_name_desc);
7065 }
7066
7067 static uint32_t
lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc * desc,struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)7068 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
7069 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
7070 {
7071
7072 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
7073 if (vport->fc_flag & FC_FABRIC) {
7074 memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
7075 sizeof(desc->port_names.wwnn));
7076
7077 memcpy(desc->port_names.wwpn, &vport->fabric_portname,
7078 sizeof(desc->port_names.wwpn));
7079 } else { /* Point to Point */
7080 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
7081 sizeof(desc->port_names.wwnn));
7082
7083 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
7084 sizeof(desc->port_names.wwpn));
7085 }
7086
7087 desc->length = cpu_to_be32(sizeof(desc->port_names));
7088 return sizeof(struct fc_rdp_port_name_desc);
7089 }
7090
7091 static void
lpfc_els_rdp_cmpl(struct lpfc_hba * phba,struct lpfc_rdp_context * rdp_context,int status)7092 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
7093 int status)
7094 {
7095 struct lpfc_nodelist *ndlp = rdp_context->ndlp;
7096 struct lpfc_vport *vport = ndlp->vport;
7097 struct lpfc_iocbq *elsiocb;
7098 struct ulp_bde64 *bpl;
7099 IOCB_t *icmd;
7100 union lpfc_wqe128 *wqe;
7101 uint8_t *pcmd;
7102 struct ls_rjt *stat;
7103 struct fc_rdp_res_frame *rdp_res;
7104 uint32_t cmdsize, len;
7105 uint16_t *flag_ptr;
7106 int rc;
7107 u32 ulp_context;
7108
7109 if (status != SUCCESS)
7110 goto error;
7111
7112 /* This will change once we know the true size of the RDP payload */
7113 cmdsize = sizeof(struct fc_rdp_res_frame);
7114
7115 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
7116 lpfc_max_els_tries, rdp_context->ndlp,
7117 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
7118 if (!elsiocb)
7119 goto free_rdp_context;
7120
7121 ulp_context = get_job_ulpcontext(phba, elsiocb);
7122 if (phba->sli_rev == LPFC_SLI_REV4) {
7123 wqe = &elsiocb->wqe;
7124 /* ox-id of the frame */
7125 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7126 rdp_context->ox_id);
7127 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
7128 rdp_context->rx_id);
7129 } else {
7130 icmd = &elsiocb->iocb;
7131 icmd->ulpContext = rdp_context->rx_id;
7132 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
7133 }
7134
7135 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7136 "2171 Xmit RDP response tag x%x xri x%x, "
7137 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
7138 elsiocb->iotag, ulp_context,
7139 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7140 ndlp->nlp_rpi);
7141 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt;
7142 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7143 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
7144 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7145
7146 /* Update Alarm and Warning */
7147 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
7148 phba->sfp_alarm |= *flag_ptr;
7149 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
7150 phba->sfp_warning |= *flag_ptr;
7151
7152 /* For RDP payload */
7153 len = 8;
7154 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *)
7155 (len + pcmd), ELS_CMD_RDP);
7156
7157 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd),
7158 rdp_context->page_a0, rdp_context->page_a2);
7159 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd),
7160 phba);
7161 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
7162 (len + pcmd), &rdp_context->link_stat);
7163 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
7164 (len + pcmd), vport);
7165 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
7166 (len + pcmd), vport, ndlp);
7167 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
7168 &rdp_context->link_stat);
7169 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
7170 &rdp_context->link_stat, vport);
7171 len += lpfc_rdp_res_oed_temp_desc(phba,
7172 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7173 rdp_context->page_a2);
7174 len += lpfc_rdp_res_oed_voltage_desc(phba,
7175 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7176 rdp_context->page_a2);
7177 len += lpfc_rdp_res_oed_txbias_desc(phba,
7178 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7179 rdp_context->page_a2);
7180 len += lpfc_rdp_res_oed_txpower_desc(phba,
7181 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7182 rdp_context->page_a2);
7183 len += lpfc_rdp_res_oed_rxpower_desc(phba,
7184 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7185 rdp_context->page_a2);
7186 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
7187 rdp_context->page_a0, vport);
7188
7189 rdp_res->length = cpu_to_be32(len - 8);
7190 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7191
7192 /* Now that we know the true size of the payload, update the BPL */
7193 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt;
7194 bpl->tus.f.bdeSize = len;
7195 bpl->tus.f.bdeFlags = 0;
7196 bpl->tus.w = le32_to_cpu(bpl->tus.w);
7197
7198 phba->fc_stat.elsXmitACC++;
7199 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7200 if (!elsiocb->ndlp) {
7201 lpfc_els_free_iocb(phba, elsiocb);
7202 goto free_rdp_context;
7203 }
7204
7205 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7206 if (rc == IOCB_ERROR) {
7207 lpfc_els_free_iocb(phba, elsiocb);
7208 lpfc_nlp_put(ndlp);
7209 }
7210
7211 goto free_rdp_context;
7212
7213 error:
7214 cmdsize = 2 * sizeof(uint32_t);
7215 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
7216 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
7217 if (!elsiocb)
7218 goto free_rdp_context;
7219
7220 if (phba->sli_rev == LPFC_SLI_REV4) {
7221 wqe = &elsiocb->wqe;
7222 /* ox-id of the frame */
7223 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7224 rdp_context->ox_id);
7225 bf_set(wqe_ctxt_tag,
7226 &wqe->xmit_els_rsp.wqe_com,
7227 rdp_context->rx_id);
7228 } else {
7229 icmd = &elsiocb->iocb;
7230 icmd->ulpContext = rdp_context->rx_id;
7231 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
7232 }
7233
7234 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7235
7236 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
7237 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
7238 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7239
7240 phba->fc_stat.elsXmitLSRJT++;
7241 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7242 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7243 if (!elsiocb->ndlp) {
7244 lpfc_els_free_iocb(phba, elsiocb);
7245 goto free_rdp_context;
7246 }
7247
7248 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7249 if (rc == IOCB_ERROR) {
7250 lpfc_els_free_iocb(phba, elsiocb);
7251 lpfc_nlp_put(ndlp);
7252 }
7253
7254 free_rdp_context:
7255 /* This reference put is for the original unsolicited RDP. If the
7256 * prep failed, there is no reference to remove.
7257 */
7258 lpfc_nlp_put(ndlp);
7259 kfree(rdp_context);
7260 }
7261
7262 static int
lpfc_get_rdp_info(struct lpfc_hba * phba,struct lpfc_rdp_context * rdp_context)7263 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
7264 {
7265 LPFC_MBOXQ_t *mbox = NULL;
7266 int rc;
7267
7268 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7269 if (!mbox) {
7270 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
7271 "7105 failed to allocate mailbox memory");
7272 return 1;
7273 }
7274
7275 if (lpfc_sli4_dump_page_a0(phba, mbox))
7276 goto rdp_fail;
7277 mbox->vport = rdp_context->ndlp->vport;
7278 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
7279 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
7280 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7281 if (rc == MBX_NOT_FINISHED) {
7282 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
7283 return 1;
7284 }
7285
7286 return 0;
7287
7288 rdp_fail:
7289 mempool_free(mbox, phba->mbox_mem_pool);
7290 return 1;
7291 }
7292
lpfc_get_sfp_info_wait(struct lpfc_hba * phba,struct lpfc_rdp_context * rdp_context)7293 int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
7294 struct lpfc_rdp_context *rdp_context)
7295 {
7296 LPFC_MBOXQ_t *mbox = NULL;
7297 int rc;
7298 struct lpfc_dmabuf *mp;
7299 struct lpfc_dmabuf *mpsave;
7300 void *virt;
7301 MAILBOX_t *mb;
7302
7303 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7304 if (!mbox) {
7305 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
7306 "7205 failed to allocate mailbox memory");
7307 return 1;
7308 }
7309
7310 if (lpfc_sli4_dump_page_a0(phba, mbox))
7311 goto sfp_fail;
7312 mp = mbox->ctx_buf;
7313 mpsave = mp;
7314 virt = mp->virt;
7315 if (phba->sli_rev < LPFC_SLI_REV4) {
7316 mb = &mbox->u.mb;
7317 mb->un.varDmp.cv = 1;
7318 mb->un.varDmp.co = 1;
7319 mb->un.varWords[2] = 0;
7320 mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4;
7321 mb->un.varWords[4] = 0;
7322 mb->un.varWords[5] = 0;
7323 mb->un.varWords[6] = 0;
7324 mb->un.varWords[7] = 0;
7325 mb->un.varWords[8] = 0;
7326 mb->un.varWords[9] = 0;
7327 mb->un.varWords[10] = 0;
7328 mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE;
7329 mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE;
7330 mbox->mbox_offset_word = 5;
7331 mbox->ctx_buf = virt;
7332 } else {
7333 bf_set(lpfc_mbx_memory_dump_type3_length,
7334 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
7335 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
7336 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
7337 }
7338 mbox->vport = phba->pport;
7339 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
7340
7341 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
7342 if (rc == MBX_NOT_FINISHED) {
7343 rc = 1;
7344 goto error;
7345 }
7346
7347 if (phba->sli_rev == LPFC_SLI_REV4)
7348 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
7349 else
7350 mp = mpsave;
7351
7352 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) {
7353 rc = 1;
7354 goto error;
7355 }
7356
7357 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0,
7358 DMP_SFF_PAGE_A0_SIZE);
7359
7360 memset(mbox, 0, sizeof(*mbox));
7361 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE);
7362 INIT_LIST_HEAD(&mp->list);
7363
7364 /* save address for completion */
7365 mbox->ctx_buf = mp;
7366 mbox->vport = phba->pport;
7367
7368 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
7369 bf_set(lpfc_mbx_memory_dump_type3_type,
7370 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
7371 bf_set(lpfc_mbx_memory_dump_type3_link,
7372 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
7373 bf_set(lpfc_mbx_memory_dump_type3_page_no,
7374 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2);
7375 if (phba->sli_rev < LPFC_SLI_REV4) {
7376 mb = &mbox->u.mb;
7377 mb->un.varDmp.cv = 1;
7378 mb->un.varDmp.co = 1;
7379 mb->un.varWords[2] = 0;
7380 mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4;
7381 mb->un.varWords[4] = 0;
7382 mb->un.varWords[5] = 0;
7383 mb->un.varWords[6] = 0;
7384 mb->un.varWords[7] = 0;
7385 mb->un.varWords[8] = 0;
7386 mb->un.varWords[9] = 0;
7387 mb->un.varWords[10] = 0;
7388 mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE;
7389 mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE;
7390 mbox->mbox_offset_word = 5;
7391 mbox->ctx_buf = virt;
7392 } else {
7393 bf_set(lpfc_mbx_memory_dump_type3_length,
7394 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
7395 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
7396 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
7397 }
7398
7399 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
7400 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
7401 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) {
7402 rc = 1;
7403 goto error;
7404 }
7405 rc = 0;
7406
7407 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
7408 DMP_SFF_PAGE_A2_SIZE);
7409
7410 error:
7411 mbox->ctx_buf = mpsave;
7412 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
7413
7414 return rc;
7415
7416 sfp_fail:
7417 mempool_free(mbox, phba->mbox_mem_pool);
7418 return 1;
7419 }
7420
7421 /*
7422 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
7423 * @vport: pointer to a host virtual N_Port data structure.
7424 * @cmdiocb: pointer to lpfc command iocb data structure.
7425 * @ndlp: pointer to a node-list data structure.
7426 *
7427 * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
7428 * IOCB. First, the payload of the unsolicited RDP is checked.
7429 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
7430 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
7431 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
7432 * gather all data and send RDP response.
7433 *
7434 * Return code
7435 * 0 - Sent the acc response
7436 * 1 - Sent the reject response.
7437 */
7438 static int
lpfc_els_rcv_rdp(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7439 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7440 struct lpfc_nodelist *ndlp)
7441 {
7442 struct lpfc_hba *phba = vport->phba;
7443 struct lpfc_dmabuf *pcmd;
7444 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
7445 struct fc_rdp_req_frame *rdp_req;
7446 struct lpfc_rdp_context *rdp_context;
7447 union lpfc_wqe128 *cmd = NULL;
7448 struct ls_rjt stat;
7449
7450 if (phba->sli_rev < LPFC_SLI_REV4 ||
7451 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
7452 LPFC_SLI_INTF_IF_TYPE_2) {
7453 rjt_err = LSRJT_UNABLE_TPC;
7454 rjt_expl = LSEXP_REQ_UNSUPPORTED;
7455 goto error;
7456 }
7457
7458 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
7459 rjt_err = LSRJT_UNABLE_TPC;
7460 rjt_expl = LSEXP_REQ_UNSUPPORTED;
7461 goto error;
7462 }
7463
7464 pcmd = cmdiocb->cmd_dmabuf;
7465 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
7466
7467 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7468 "2422 ELS RDP Request "
7469 "dec len %d tag x%x port_id %d len %d\n",
7470 be32_to_cpu(rdp_req->rdp_des_length),
7471 be32_to_cpu(rdp_req->nport_id_desc.tag),
7472 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
7473 be32_to_cpu(rdp_req->nport_id_desc.length));
7474
7475 if (sizeof(struct fc_rdp_nport_desc) !=
7476 be32_to_cpu(rdp_req->rdp_des_length))
7477 goto rjt_logerr;
7478 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
7479 goto rjt_logerr;
7480 if (RDP_NPORT_ID_SIZE !=
7481 be32_to_cpu(rdp_req->nport_id_desc.length))
7482 goto rjt_logerr;
7483 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL);
7484 if (!rdp_context) {
7485 rjt_err = LSRJT_UNABLE_TPC;
7486 goto error;
7487 }
7488
7489 cmd = &cmdiocb->wqe;
7490 rdp_context->ndlp = lpfc_nlp_get(ndlp);
7491 if (!rdp_context->ndlp) {
7492 kfree(rdp_context);
7493 rjt_err = LSRJT_UNABLE_TPC;
7494 goto error;
7495 }
7496 rdp_context->ox_id = bf_get(wqe_rcvoxid,
7497 &cmd->xmit_els_rsp.wqe_com);
7498 rdp_context->rx_id = bf_get(wqe_ctxt_tag,
7499 &cmd->xmit_els_rsp.wqe_com);
7500 rdp_context->cmpl = lpfc_els_rdp_cmpl;
7501 if (lpfc_get_rdp_info(phba, rdp_context)) {
7502 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
7503 "2423 Unable to send mailbox");
7504 kfree(rdp_context);
7505 rjt_err = LSRJT_UNABLE_TPC;
7506 lpfc_nlp_put(ndlp);
7507 goto error;
7508 }
7509
7510 return 0;
7511
7512 rjt_logerr:
7513 rjt_err = LSRJT_LOGICAL_ERR;
7514
7515 error:
7516 memset(&stat, 0, sizeof(stat));
7517 stat.un.b.lsRjtRsnCode = rjt_err;
7518 stat.un.b.lsRjtRsnCodeExp = rjt_expl;
7519 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7520 return 1;
7521 }
7522
7523
7524 static void
lpfc_els_lcb_rsp(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)7525 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7526 {
7527 MAILBOX_t *mb;
7528 IOCB_t *icmd;
7529 union lpfc_wqe128 *wqe;
7530 uint8_t *pcmd;
7531 struct lpfc_iocbq *elsiocb;
7532 struct lpfc_nodelist *ndlp;
7533 struct ls_rjt *stat;
7534 union lpfc_sli4_cfg_shdr *shdr;
7535 struct lpfc_lcb_context *lcb_context;
7536 struct fc_lcb_res_frame *lcb_res;
7537 uint32_t cmdsize, shdr_status, shdr_add_status;
7538 int rc;
7539
7540 mb = &pmb->u.mb;
7541 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp;
7542 ndlp = lcb_context->ndlp;
7543 pmb->ctx_ndlp = NULL;
7544 pmb->ctx_buf = NULL;
7545
7546 shdr = (union lpfc_sli4_cfg_shdr *)
7547 &pmb->u.mqe.un.beacon_config.header.cfg_shdr;
7548 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7549 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7550
7551 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
7552 "0194 SET_BEACON_CONFIG mailbox "
7553 "completed with status x%x add_status x%x,"
7554 " mbx status x%x\n",
7555 shdr_status, shdr_add_status, mb->mbxStatus);
7556
7557 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status ||
7558 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) ||
7559 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) {
7560 mempool_free(pmb, phba->mbox_mem_pool);
7561 goto error;
7562 }
7563
7564 mempool_free(pmb, phba->mbox_mem_pool);
7565 cmdsize = sizeof(struct fc_lcb_res_frame);
7566 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7567 lpfc_max_els_tries, ndlp,
7568 ndlp->nlp_DID, ELS_CMD_ACC);
7569
7570 /* Decrement the ndlp reference count from previous mbox command */
7571 lpfc_nlp_put(ndlp);
7572
7573 if (!elsiocb)
7574 goto free_lcb_context;
7575
7576 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt;
7577
7578 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
7579
7580 if (phba->sli_rev == LPFC_SLI_REV4) {
7581 wqe = &elsiocb->wqe;
7582 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
7583 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7584 lcb_context->ox_id);
7585 } else {
7586 icmd = &elsiocb->iocb;
7587 icmd->ulpContext = lcb_context->rx_id;
7588 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
7589 }
7590
7591 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7592 *((uint32_t *)(pcmd)) = ELS_CMD_ACC;
7593 lcb_res->lcb_sub_command = lcb_context->sub_command;
7594 lcb_res->lcb_type = lcb_context->type;
7595 lcb_res->capability = lcb_context->capability;
7596 lcb_res->lcb_frequency = lcb_context->frequency;
7597 lcb_res->lcb_duration = lcb_context->duration;
7598 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7599 phba->fc_stat.elsXmitACC++;
7600
7601 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7602 if (!elsiocb->ndlp) {
7603 lpfc_els_free_iocb(phba, elsiocb);
7604 goto out;
7605 }
7606
7607 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7608 if (rc == IOCB_ERROR) {
7609 lpfc_els_free_iocb(phba, elsiocb);
7610 lpfc_nlp_put(ndlp);
7611 }
7612 out:
7613 kfree(lcb_context);
7614 return;
7615
7616 error:
7617 cmdsize = sizeof(struct fc_lcb_res_frame);
7618 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7619 lpfc_max_els_tries, ndlp,
7620 ndlp->nlp_DID, ELS_CMD_LS_RJT);
7621 lpfc_nlp_put(ndlp);
7622 if (!elsiocb)
7623 goto free_lcb_context;
7624
7625 if (phba->sli_rev == LPFC_SLI_REV4) {
7626 wqe = &elsiocb->wqe;
7627 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
7628 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7629 lcb_context->ox_id);
7630 } else {
7631 icmd = &elsiocb->iocb;
7632 icmd->ulpContext = lcb_context->rx_id;
7633 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
7634 }
7635
7636 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7637
7638 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
7639 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
7640 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7641
7642 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
7643 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
7644
7645 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7646 phba->fc_stat.elsXmitLSRJT++;
7647 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7648 if (!elsiocb->ndlp) {
7649 lpfc_els_free_iocb(phba, elsiocb);
7650 goto free_lcb_context;
7651 }
7652
7653 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7654 if (rc == IOCB_ERROR) {
7655 lpfc_els_free_iocb(phba, elsiocb);
7656 lpfc_nlp_put(ndlp);
7657 }
7658 free_lcb_context:
7659 kfree(lcb_context);
7660 }
7661
7662 static int
lpfc_sli4_set_beacon(struct lpfc_vport * vport,struct lpfc_lcb_context * lcb_context,uint32_t beacon_state)7663 lpfc_sli4_set_beacon(struct lpfc_vport *vport,
7664 struct lpfc_lcb_context *lcb_context,
7665 uint32_t beacon_state)
7666 {
7667 struct lpfc_hba *phba = vport->phba;
7668 union lpfc_sli4_cfg_shdr *cfg_shdr;
7669 LPFC_MBOXQ_t *mbox = NULL;
7670 uint32_t len;
7671 int rc;
7672
7673 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7674 if (!mbox)
7675 return 1;
7676
7677 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
7678 len = sizeof(struct lpfc_mbx_set_beacon_config) -
7679 sizeof(struct lpfc_sli4_cfg_mhdr);
7680 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7681 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
7682 LPFC_SLI4_MBX_EMBED);
7683 mbox->ctx_ndlp = (void *)lcb_context;
7684 mbox->vport = phba->pport;
7685 mbox->mbox_cmpl = lpfc_els_lcb_rsp;
7686 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
7687 phba->sli4_hba.physical_port);
7688 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
7689 beacon_state);
7690 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */
7691
7692 /*
7693 * Check bv1s bit before issuing the mailbox
7694 * if bv1s == 1, LCB V1 supported
7695 * else, LCB V0 supported
7696 */
7697
7698 if (phba->sli4_hba.pc_sli4_params.bv1s) {
7699 /* COMMON_SET_BEACON_CONFIG_V1 */
7700 cfg_shdr->request.word9 = BEACON_VERSION_V1;
7701 lcb_context->capability |= LCB_CAPABILITY_DURATION;
7702 bf_set(lpfc_mbx_set_beacon_port_type,
7703 &mbox->u.mqe.un.beacon_config, 0);
7704 bf_set(lpfc_mbx_set_beacon_duration_v1,
7705 &mbox->u.mqe.un.beacon_config,
7706 be16_to_cpu(lcb_context->duration));
7707 } else {
7708 /* COMMON_SET_BEACON_CONFIG_V0 */
7709 if (be16_to_cpu(lcb_context->duration) != 0) {
7710 mempool_free(mbox, phba->mbox_mem_pool);
7711 return 1;
7712 }
7713 cfg_shdr->request.word9 = BEACON_VERSION_V0;
7714 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION);
7715 bf_set(lpfc_mbx_set_beacon_state,
7716 &mbox->u.mqe.un.beacon_config, beacon_state);
7717 bf_set(lpfc_mbx_set_beacon_port_type,
7718 &mbox->u.mqe.un.beacon_config, 1);
7719 bf_set(lpfc_mbx_set_beacon_duration,
7720 &mbox->u.mqe.un.beacon_config,
7721 be16_to_cpu(lcb_context->duration));
7722 }
7723
7724 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7725 if (rc == MBX_NOT_FINISHED) {
7726 mempool_free(mbox, phba->mbox_mem_pool);
7727 return 1;
7728 }
7729
7730 return 0;
7731 }
7732
7733
7734 /**
7735 * lpfc_els_rcv_lcb - Process an unsolicited LCB
7736 * @vport: pointer to a host virtual N_Port data structure.
7737 * @cmdiocb: pointer to lpfc command iocb data structure.
7738 * @ndlp: pointer to a node-list data structure.
7739 *
7740 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
7741 * First, the payload of the unsolicited LCB is checked.
7742 * Then based on Subcommand beacon will either turn on or off.
7743 *
7744 * Return code
7745 * 0 - Sent the acc response
7746 * 1 - Sent the reject response.
7747 **/
7748 static int
lpfc_els_rcv_lcb(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7749 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7750 struct lpfc_nodelist *ndlp)
7751 {
7752 struct lpfc_hba *phba = vport->phba;
7753 struct lpfc_dmabuf *pcmd;
7754 uint8_t *lp;
7755 struct fc_lcb_request_frame *beacon;
7756 struct lpfc_lcb_context *lcb_context;
7757 u8 state, rjt_err = 0;
7758 struct ls_rjt stat;
7759
7760 pcmd = cmdiocb->cmd_dmabuf;
7761 lp = (uint8_t *)pcmd->virt;
7762 beacon = (struct fc_lcb_request_frame *)pcmd->virt;
7763
7764 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7765 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
7766 "type x%x frequency %x duration x%x\n",
7767 lp[0], lp[1], lp[2],
7768 beacon->lcb_command,
7769 beacon->lcb_sub_command,
7770 beacon->lcb_type,
7771 beacon->lcb_frequency,
7772 be16_to_cpu(beacon->lcb_duration));
7773
7774 if (beacon->lcb_sub_command != LPFC_LCB_ON &&
7775 beacon->lcb_sub_command != LPFC_LCB_OFF) {
7776 rjt_err = LSRJT_CMD_UNSUPPORTED;
7777 goto rjt;
7778 }
7779
7780 if (phba->sli_rev < LPFC_SLI_REV4 ||
7781 phba->hba_flag & HBA_FCOE_MODE ||
7782 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
7783 LPFC_SLI_INTF_IF_TYPE_2)) {
7784 rjt_err = LSRJT_CMD_UNSUPPORTED;
7785 goto rjt;
7786 }
7787
7788 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL);
7789 if (!lcb_context) {
7790 rjt_err = LSRJT_UNABLE_TPC;
7791 goto rjt;
7792 }
7793
7794 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
7795 lcb_context->sub_command = beacon->lcb_sub_command;
7796 lcb_context->capability = 0;
7797 lcb_context->type = beacon->lcb_type;
7798 lcb_context->frequency = beacon->lcb_frequency;
7799 lcb_context->duration = beacon->lcb_duration;
7800 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb);
7801 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb);
7802 lcb_context->ndlp = lpfc_nlp_get(ndlp);
7803 if (!lcb_context->ndlp) {
7804 rjt_err = LSRJT_UNABLE_TPC;
7805 goto rjt_free;
7806 }
7807
7808 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
7809 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT,
7810 "0193 failed to send mail box");
7811 lpfc_nlp_put(ndlp);
7812 rjt_err = LSRJT_UNABLE_TPC;
7813 goto rjt_free;
7814 }
7815 return 0;
7816
7817 rjt_free:
7818 kfree(lcb_context);
7819 rjt:
7820 memset(&stat, 0, sizeof(stat));
7821 stat.un.b.lsRjtRsnCode = rjt_err;
7822 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7823 return 1;
7824 }
7825
7826
7827 /**
7828 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
7829 * @vport: pointer to a host virtual N_Port data structure.
7830 *
7831 * This routine cleans up any Registration State Change Notification
7832 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
7833 * @vport together with the host_lock is used to prevent multiple thread
7834 * trying to access the RSCN array on a same @vport at the same time.
7835 **/
7836 void
lpfc_els_flush_rscn(struct lpfc_vport * vport)7837 lpfc_els_flush_rscn(struct lpfc_vport *vport)
7838 {
7839 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7840 struct lpfc_hba *phba = vport->phba;
7841 int i;
7842
7843 spin_lock_irq(shost->host_lock);
7844 if (vport->fc_rscn_flush) {
7845 /* Another thread is walking fc_rscn_id_list on this vport */
7846 spin_unlock_irq(shost->host_lock);
7847 return;
7848 }
7849 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
7850 vport->fc_rscn_flush = 1;
7851 spin_unlock_irq(shost->host_lock);
7852
7853 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
7854 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
7855 vport->fc_rscn_id_list[i] = NULL;
7856 }
7857 spin_lock_irq(shost->host_lock);
7858 vport->fc_rscn_id_cnt = 0;
7859 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
7860 spin_unlock_irq(shost->host_lock);
7861 lpfc_can_disctmo(vport);
7862 /* Indicate we are done walking this fc_rscn_id_list */
7863 vport->fc_rscn_flush = 0;
7864 }
7865
7866 /**
7867 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
7868 * @vport: pointer to a host virtual N_Port data structure.
7869 * @did: remote destination port identifier.
7870 *
7871 * This routine checks whether there is any pending Registration State
7872 * Configuration Notification (RSCN) to a @did on @vport.
7873 *
7874 * Return code
7875 * None zero - The @did matched with a pending rscn
7876 * 0 - not able to match @did with a pending rscn
7877 **/
7878 int
lpfc_rscn_payload_check(struct lpfc_vport * vport,uint32_t did)7879 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
7880 {
7881 D_ID ns_did;
7882 D_ID rscn_did;
7883 uint32_t *lp;
7884 uint32_t payload_len, i;
7885 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7886
7887 ns_did.un.word = did;
7888
7889 /* Never match fabric nodes for RSCNs */
7890 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
7891 return 0;
7892
7893 /* If we are doing a FULL RSCN rediscovery, match everything */
7894 if (vport->fc_flag & FC_RSCN_DISCOVERY)
7895 return did;
7896
7897 spin_lock_irq(shost->host_lock);
7898 if (vport->fc_rscn_flush) {
7899 /* Another thread is walking fc_rscn_id_list on this vport */
7900 spin_unlock_irq(shost->host_lock);
7901 return 0;
7902 }
7903 /* Indicate we are walking fc_rscn_id_list on this vport */
7904 vport->fc_rscn_flush = 1;
7905 spin_unlock_irq(shost->host_lock);
7906 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
7907 lp = vport->fc_rscn_id_list[i]->virt;
7908 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
7909 payload_len -= sizeof(uint32_t); /* take off word 0 */
7910 while (payload_len) {
7911 rscn_did.un.word = be32_to_cpu(*lp++);
7912 payload_len -= sizeof(uint32_t);
7913 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
7914 case RSCN_ADDRESS_FORMAT_PORT:
7915 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
7916 && (ns_did.un.b.area == rscn_did.un.b.area)
7917 && (ns_did.un.b.id == rscn_did.un.b.id))
7918 goto return_did_out;
7919 break;
7920 case RSCN_ADDRESS_FORMAT_AREA:
7921 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
7922 && (ns_did.un.b.area == rscn_did.un.b.area))
7923 goto return_did_out;
7924 break;
7925 case RSCN_ADDRESS_FORMAT_DOMAIN:
7926 if (ns_did.un.b.domain == rscn_did.un.b.domain)
7927 goto return_did_out;
7928 break;
7929 case RSCN_ADDRESS_FORMAT_FABRIC:
7930 goto return_did_out;
7931 }
7932 }
7933 }
7934 /* Indicate we are done with walking fc_rscn_id_list on this vport */
7935 vport->fc_rscn_flush = 0;
7936 return 0;
7937 return_did_out:
7938 /* Indicate we are done with walking fc_rscn_id_list on this vport */
7939 vport->fc_rscn_flush = 0;
7940 return did;
7941 }
7942
7943 /**
7944 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
7945 * @vport: pointer to a host virtual N_Port data structure.
7946 *
7947 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
7948 * state machine for a @vport's nodes that are with pending RSCN (Registration
7949 * State Change Notification).
7950 *
7951 * Return code
7952 * 0 - Successful (currently alway return 0)
7953 **/
7954 static int
lpfc_rscn_recovery_check(struct lpfc_vport * vport)7955 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
7956 {
7957 struct lpfc_nodelist *ndlp = NULL, *n;
7958
7959 /* Move all affected nodes by pending RSCNs to NPR state. */
7960 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) {
7961 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
7962 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
7963 continue;
7964
7965 /* NVME Target mode does not do RSCN Recovery. */
7966 if (vport->phba->nvmet_support)
7967 continue;
7968
7969 /* If we are in the process of doing discovery on this
7970 * NPort, let it continue on its own.
7971 */
7972 switch (ndlp->nlp_state) {
7973 case NLP_STE_PLOGI_ISSUE:
7974 case NLP_STE_ADISC_ISSUE:
7975 case NLP_STE_REG_LOGIN_ISSUE:
7976 case NLP_STE_PRLI_ISSUE:
7977 case NLP_STE_LOGO_ISSUE:
7978 continue;
7979 }
7980
7981 lpfc_disc_state_machine(vport, ndlp, NULL,
7982 NLP_EVT_DEVICE_RECOVERY);
7983 lpfc_cancel_retry_delay_tmo(vport, ndlp);
7984 }
7985 return 0;
7986 }
7987
7988 /**
7989 * lpfc_send_rscn_event - Send an RSCN event to management application
7990 * @vport: pointer to a host virtual N_Port data structure.
7991 * @cmdiocb: pointer to lpfc command iocb data structure.
7992 *
7993 * lpfc_send_rscn_event sends an RSCN netlink event to management
7994 * applications.
7995 */
7996 static void
lpfc_send_rscn_event(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb)7997 lpfc_send_rscn_event(struct lpfc_vport *vport,
7998 struct lpfc_iocbq *cmdiocb)
7999 {
8000 struct lpfc_dmabuf *pcmd;
8001 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8002 uint32_t *payload_ptr;
8003 uint32_t payload_len;
8004 struct lpfc_rscn_event_header *rscn_event_data;
8005
8006 pcmd = cmdiocb->cmd_dmabuf;
8007 payload_ptr = (uint32_t *) pcmd->virt;
8008 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
8009
8010 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
8011 payload_len, GFP_KERNEL);
8012 if (!rscn_event_data) {
8013 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8014 "0147 Failed to allocate memory for RSCN event\n");
8015 return;
8016 }
8017 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
8018 rscn_event_data->payload_length = payload_len;
8019 memcpy(rscn_event_data->rscn_payload, payload_ptr,
8020 payload_len);
8021
8022 fc_host_post_vendor_event(shost,
8023 fc_get_event_number(),
8024 sizeof(struct lpfc_rscn_event_header) + payload_len,
8025 (char *)rscn_event_data,
8026 LPFC_NL_VENDOR_ID);
8027
8028 kfree(rscn_event_data);
8029 }
8030
8031 /**
8032 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
8033 * @vport: pointer to a host virtual N_Port data structure.
8034 * @cmdiocb: pointer to lpfc command iocb data structure.
8035 * @ndlp: pointer to a node-list data structure.
8036 *
8037 * This routine processes an unsolicited RSCN (Registration State Change
8038 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
8039 * to invoke fc_host_post_event() routine to the FC transport layer. If the
8040 * discover state machine is about to begin discovery, it just accepts the
8041 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
8042 * contains N_Port IDs for other vports on this HBA, it just accepts the
8043 * RSCN and ignore processing it. If the state machine is in the recovery
8044 * state, the fc_rscn_id_list of this @vport is walked and the
8045 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
8046 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
8047 * routine is invoked to handle the RSCN event.
8048 *
8049 * Return code
8050 * 0 - Just sent the acc response
8051 * 1 - Sent the acc response and waited for name server completion
8052 **/
8053 static int
lpfc_els_rcv_rscn(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8054 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8055 struct lpfc_nodelist *ndlp)
8056 {
8057 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8058 struct lpfc_hba *phba = vport->phba;
8059 struct lpfc_dmabuf *pcmd;
8060 uint32_t *lp, *datap;
8061 uint32_t payload_len, length, nportid, *cmd;
8062 int rscn_cnt;
8063 int rscn_id = 0, hba_id = 0;
8064 int i, tmo;
8065
8066 pcmd = cmdiocb->cmd_dmabuf;
8067 lp = (uint32_t *) pcmd->virt;
8068
8069 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
8070 payload_len -= sizeof(uint32_t); /* take off word 0 */
8071 /* RSCN received */
8072 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8073 "0214 RSCN received Data: x%x x%x x%x x%x\n",
8074 vport->fc_flag, payload_len, *lp,
8075 vport->fc_rscn_id_cnt);
8076
8077 /* Send an RSCN event to the management application */
8078 lpfc_send_rscn_event(vport, cmdiocb);
8079
8080 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
8081 fc_host_post_event(shost, fc_get_event_number(),
8082 FCH_EVT_RSCN, lp[i]);
8083
8084 /* Check if RSCN is coming from a direct-connected remote NPort */
8085 if (vport->fc_flag & FC_PT2PT) {
8086 /* If so, just ACC it, no other action needed for now */
8087 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8088 "2024 pt2pt RSCN %08x Data: x%x x%x\n",
8089 *lp, vport->fc_flag, payload_len);
8090 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8091
8092 /* Check to see if we need to NVME rescan this target
8093 * remoteport.
8094 */
8095 if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
8096 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
8097 lpfc_nvme_rescan_port(vport, ndlp);
8098 return 0;
8099 }
8100
8101 /* If we are about to begin discovery, just ACC the RSCN.
8102 * Discovery processing will satisfy it.
8103 */
8104 if (vport->port_state <= LPFC_NS_QRY) {
8105 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8106 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
8107 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
8108
8109 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8110 return 0;
8111 }
8112
8113 /* If this RSCN just contains NPortIDs for other vports on this HBA,
8114 * just ACC and ignore it.
8115 */
8116 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
8117 !(vport->cfg_peer_port_login)) {
8118 i = payload_len;
8119 datap = lp;
8120 while (i > 0) {
8121 nportid = *datap++;
8122 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
8123 i -= sizeof(uint32_t);
8124 rscn_id++;
8125 if (lpfc_find_vport_by_did(phba, nportid))
8126 hba_id++;
8127 }
8128 if (rscn_id == hba_id) {
8129 /* ALL NPortIDs in RSCN are on HBA */
8130 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8131 "0219 Ignore RSCN "
8132 "Data: x%x x%x x%x x%x\n",
8133 vport->fc_flag, payload_len,
8134 *lp, vport->fc_rscn_id_cnt);
8135 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8136 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
8137 ndlp->nlp_DID, vport->port_state,
8138 ndlp->nlp_flag);
8139
8140 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
8141 ndlp, NULL);
8142 /* Restart disctmo if its already running */
8143 if (vport->fc_flag & FC_DISC_TMO) {
8144 tmo = ((phba->fc_ratov * 3) + 3);
8145 mod_timer(&vport->fc_disctmo,
8146 jiffies +
8147 msecs_to_jiffies(1000 * tmo));
8148 }
8149 return 0;
8150 }
8151 }
8152
8153 spin_lock_irq(shost->host_lock);
8154 if (vport->fc_rscn_flush) {
8155 /* Another thread is walking fc_rscn_id_list on this vport */
8156 vport->fc_flag |= FC_RSCN_DISCOVERY;
8157 spin_unlock_irq(shost->host_lock);
8158 /* Send back ACC */
8159 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8160 return 0;
8161 }
8162 /* Indicate we are walking fc_rscn_id_list on this vport */
8163 vport->fc_rscn_flush = 1;
8164 spin_unlock_irq(shost->host_lock);
8165 /* Get the array count after successfully have the token */
8166 rscn_cnt = vport->fc_rscn_id_cnt;
8167 /* If we are already processing an RSCN, save the received
8168 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later.
8169 */
8170 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
8171 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8172 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
8173 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
8174
8175 spin_lock_irq(shost->host_lock);
8176 vport->fc_flag |= FC_RSCN_DEFERRED;
8177
8178 /* Restart disctmo if its already running */
8179 if (vport->fc_flag & FC_DISC_TMO) {
8180 tmo = ((phba->fc_ratov * 3) + 3);
8181 mod_timer(&vport->fc_disctmo,
8182 jiffies + msecs_to_jiffies(1000 * tmo));
8183 }
8184 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
8185 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
8186 vport->fc_flag |= FC_RSCN_MODE;
8187 spin_unlock_irq(shost->host_lock);
8188 if (rscn_cnt) {
8189 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
8190 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
8191 }
8192 if ((rscn_cnt) &&
8193 (payload_len + length <= LPFC_BPL_SIZE)) {
8194 *cmd &= ELS_CMD_MASK;
8195 *cmd |= cpu_to_be32(payload_len + length);
8196 memcpy(((uint8_t *)cmd) + length, lp,
8197 payload_len);
8198 } else {
8199 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
8200 vport->fc_rscn_id_cnt++;
8201 /* If we zero, cmdiocb->cmd_dmabuf, the calling
8202 * routine will not try to free it.
8203 */
8204 cmdiocb->cmd_dmabuf = NULL;
8205 }
8206 /* Deferred RSCN */
8207 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8208 "0235 Deferred RSCN "
8209 "Data: x%x x%x x%x\n",
8210 vport->fc_rscn_id_cnt, vport->fc_flag,
8211 vport->port_state);
8212 } else {
8213 vport->fc_flag |= FC_RSCN_DISCOVERY;
8214 spin_unlock_irq(shost->host_lock);
8215 /* ReDiscovery RSCN */
8216 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8217 "0234 ReDiscovery RSCN "
8218 "Data: x%x x%x x%x\n",
8219 vport->fc_rscn_id_cnt, vport->fc_flag,
8220 vport->port_state);
8221 }
8222 /* Indicate we are done walking fc_rscn_id_list on this vport */
8223 vport->fc_rscn_flush = 0;
8224 /* Send back ACC */
8225 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8226 /* send RECOVERY event for ALL nodes that match RSCN payload */
8227 lpfc_rscn_recovery_check(vport);
8228 return 0;
8229 }
8230 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8231 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
8232 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
8233
8234 spin_lock_irq(shost->host_lock);
8235 vport->fc_flag |= FC_RSCN_MODE;
8236 spin_unlock_irq(shost->host_lock);
8237 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
8238 /* Indicate we are done walking fc_rscn_id_list on this vport */
8239 vport->fc_rscn_flush = 0;
8240 /*
8241 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will
8242 * not try to free it.
8243 */
8244 cmdiocb->cmd_dmabuf = NULL;
8245 lpfc_set_disctmo(vport);
8246 /* Send back ACC */
8247 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8248 /* send RECOVERY event for ALL nodes that match RSCN payload */
8249 lpfc_rscn_recovery_check(vport);
8250 return lpfc_els_handle_rscn(vport);
8251 }
8252
8253 /**
8254 * lpfc_els_handle_rscn - Handle rscn for a vport
8255 * @vport: pointer to a host virtual N_Port data structure.
8256 *
8257 * This routine handles the Registration State Configuration Notification
8258 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
8259 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
8260 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
8261 * NameServer shall be issued. If CT command to the NameServer fails to be
8262 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
8263 * RSCN activities with the @vport.
8264 *
8265 * Return code
8266 * 0 - Cleaned up rscn on the @vport
8267 * 1 - Wait for plogi to name server before proceed
8268 **/
8269 int
lpfc_els_handle_rscn(struct lpfc_vport * vport)8270 lpfc_els_handle_rscn(struct lpfc_vport *vport)
8271 {
8272 struct lpfc_nodelist *ndlp;
8273 struct lpfc_hba *phba = vport->phba;
8274
8275 /* Ignore RSCN if the port is being torn down. */
8276 if (vport->load_flag & FC_UNLOADING) {
8277 lpfc_els_flush_rscn(vport);
8278 return 0;
8279 }
8280
8281 /* Start timer for RSCN processing */
8282 lpfc_set_disctmo(vport);
8283
8284 /* RSCN processed */
8285 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8286 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
8287 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
8288 vport->port_state, vport->num_disc_nodes,
8289 vport->gidft_inp);
8290
8291 /* To process RSCN, first compare RSCN data with NameServer */
8292 vport->fc_ns_retry = 0;
8293 vport->num_disc_nodes = 0;
8294
8295 ndlp = lpfc_findnode_did(vport, NameServer_DID);
8296 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
8297 /* Good ndlp, issue CT Request to NameServer. Need to
8298 * know how many gidfts were issued. If none, then just
8299 * flush the RSCN. Otherwise, the outstanding requests
8300 * need to complete.
8301 */
8302 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) {
8303 if (lpfc_issue_gidft(vport) > 0)
8304 return 1;
8305 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) {
8306 if (lpfc_issue_gidpt(vport) > 0)
8307 return 1;
8308 } else {
8309 return 1;
8310 }
8311 } else {
8312 /* Nameserver login in question. Revalidate. */
8313 if (ndlp) {
8314 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
8315 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8316 } else {
8317 ndlp = lpfc_nlp_init(vport, NameServer_DID);
8318 if (!ndlp) {
8319 lpfc_els_flush_rscn(vport);
8320 return 0;
8321 }
8322 ndlp->nlp_prev_state = ndlp->nlp_state;
8323 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8324 }
8325 ndlp->nlp_type |= NLP_FABRIC;
8326 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
8327 /* Wait for NameServer login cmpl before we can
8328 * continue
8329 */
8330 return 1;
8331 }
8332
8333 lpfc_els_flush_rscn(vport);
8334 return 0;
8335 }
8336
8337 /**
8338 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
8339 * @vport: pointer to a host virtual N_Port data structure.
8340 * @cmdiocb: pointer to lpfc command iocb data structure.
8341 * @ndlp: pointer to a node-list data structure.
8342 *
8343 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
8344 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
8345 * point topology. As an unsolicited FLOGI should not be received in a loop
8346 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
8347 * lpfc_check_sparm() routine is invoked to check the parameters in the
8348 * unsolicited FLOGI. If parameters validation failed, the routine
8349 * lpfc_els_rsp_reject() shall be called with reject reason code set to
8350 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
8351 * FLOGI shall be compared with the Port WWN of the @vport to determine who
8352 * will initiate PLOGI. The higher lexicographical value party shall has
8353 * higher priority (as the winning port) and will initiate PLOGI and
8354 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
8355 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
8356 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
8357 *
8358 * Return code
8359 * 0 - Successfully processed the unsolicited flogi
8360 * 1 - Failed to process the unsolicited flogi
8361 **/
8362 static int
lpfc_els_rcv_flogi(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8363 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8364 struct lpfc_nodelist *ndlp)
8365 {
8366 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8367 struct lpfc_hba *phba = vport->phba;
8368 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
8369 uint32_t *lp = (uint32_t *) pcmd->virt;
8370 union lpfc_wqe128 *wqe = &cmdiocb->wqe;
8371 struct serv_parm *sp;
8372 LPFC_MBOXQ_t *mbox;
8373 uint32_t cmd, did;
8374 int rc;
8375 uint32_t fc_flag = 0;
8376 uint32_t port_state = 0;
8377
8378 /* Clear external loopback plug detected flag */
8379 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
8380
8381 cmd = *lp++;
8382 sp = (struct serv_parm *) lp;
8383
8384 /* FLOGI received */
8385
8386 lpfc_set_disctmo(vport);
8387
8388 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8389 /* We should never receive a FLOGI in loop mode, ignore it */
8390 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
8391
8392 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
8393 Loop Mode */
8394 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8395 "0113 An FLOGI ELS command x%x was "
8396 "received from DID x%x in Loop Mode\n",
8397 cmd, did);
8398 return 1;
8399 }
8400
8401 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
8402
8403 /*
8404 * If our portname is greater than the remote portname,
8405 * then we initiate Nport login.
8406 */
8407
8408 rc = memcmp(&vport->fc_portname, &sp->portName,
8409 sizeof(struct lpfc_name));
8410
8411 if (!rc) {
8412 if (phba->sli_rev < LPFC_SLI_REV4) {
8413 mbox = mempool_alloc(phba->mbox_mem_pool,
8414 GFP_KERNEL);
8415 if (!mbox)
8416 return 1;
8417 lpfc_linkdown(phba);
8418 lpfc_init_link(phba, mbox,
8419 phba->cfg_topology,
8420 phba->cfg_link_speed);
8421 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
8422 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
8423 mbox->vport = vport;
8424 rc = lpfc_sli_issue_mbox(phba, mbox,
8425 MBX_NOWAIT);
8426 lpfc_set_loopback_flag(phba);
8427 if (rc == MBX_NOT_FINISHED)
8428 mempool_free(mbox, phba->mbox_mem_pool);
8429 return 1;
8430 }
8431
8432 /* External loopback plug insertion detected */
8433 phba->link_flag |= LS_EXTERNAL_LOOPBACK;
8434
8435 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC,
8436 "1119 External Loopback plug detected\n");
8437
8438 /* abort the flogi coming back to ourselves
8439 * due to external loopback on the port.
8440 */
8441 lpfc_els_abort_flogi(phba);
8442 return 0;
8443
8444 } else if (rc > 0) { /* greater than */
8445 spin_lock_irq(shost->host_lock);
8446 vport->fc_flag |= FC_PT2PT_PLOGI;
8447 spin_unlock_irq(shost->host_lock);
8448
8449 /* If we have the high WWPN we can assign our own
8450 * myDID; otherwise, we have to WAIT for a PLOGI
8451 * from the remote NPort to find out what it
8452 * will be.
8453 */
8454 vport->fc_myDID = PT2PT_LocalID;
8455 } else {
8456 vport->fc_myDID = PT2PT_RemoteID;
8457 }
8458
8459 /*
8460 * The vport state should go to LPFC_FLOGI only
8461 * AFTER we issue a FLOGI, not receive one.
8462 */
8463 spin_lock_irq(shost->host_lock);
8464 fc_flag = vport->fc_flag;
8465 port_state = vport->port_state;
8466 vport->fc_flag |= FC_PT2PT;
8467 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
8468
8469 /* Acking an unsol FLOGI. Count 1 for link bounce
8470 * work-around.
8471 */
8472 vport->rcv_flogi_cnt++;
8473 spin_unlock_irq(shost->host_lock);
8474 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8475 "3311 Rcv Flogi PS x%x new PS x%x "
8476 "fc_flag x%x new fc_flag x%x\n",
8477 port_state, vport->port_state,
8478 fc_flag, vport->fc_flag);
8479
8480 /*
8481 * We temporarily set fc_myDID to make it look like we are
8482 * a Fabric. This is done just so we end up with the right
8483 * did / sid on the FLOGI ACC rsp.
8484 */
8485 did = vport->fc_myDID;
8486 vport->fc_myDID = Fabric_DID;
8487
8488 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
8489
8490 /* Defer ACC response until AFTER we issue a FLOGI */
8491 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
8492 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag,
8493 &wqe->xmit_els_rsp.wqe_com);
8494 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid,
8495 &wqe->xmit_els_rsp.wqe_com);
8496
8497 vport->fc_myDID = did;
8498
8499 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8500 "3344 Deferring FLOGI ACC: rx_id: x%x,"
8501 " ox_id: x%x, hba_flag x%x\n",
8502 phba->defer_flogi_acc_rx_id,
8503 phba->defer_flogi_acc_ox_id, phba->hba_flag);
8504
8505 phba->defer_flogi_acc_flag = true;
8506
8507 return 0;
8508 }
8509
8510 /* Send back ACC */
8511 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
8512
8513 /* Now lets put fc_myDID back to what its supposed to be */
8514 vport->fc_myDID = did;
8515
8516 return 0;
8517 }
8518
8519 /**
8520 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
8521 * @vport: pointer to a host virtual N_Port data structure.
8522 * @cmdiocb: pointer to lpfc command iocb data structure.
8523 * @ndlp: pointer to a node-list data structure.
8524 *
8525 * This routine processes Request Node Identification Data (RNID) IOCB
8526 * received as an ELS unsolicited event. Only when the RNID specified format
8527 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
8528 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
8529 * Accept (ACC) the RNID ELS command. All the other RNID formats are
8530 * rejected by invoking the lpfc_els_rsp_reject() routine.
8531 *
8532 * Return code
8533 * 0 - Successfully processed rnid iocb (currently always return 0)
8534 **/
8535 static int
lpfc_els_rcv_rnid(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8536 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8537 struct lpfc_nodelist *ndlp)
8538 {
8539 struct lpfc_dmabuf *pcmd;
8540 uint32_t *lp;
8541 RNID *rn;
8542 struct ls_rjt stat;
8543
8544 pcmd = cmdiocb->cmd_dmabuf;
8545 lp = (uint32_t *) pcmd->virt;
8546
8547 lp++;
8548 rn = (RNID *) lp;
8549
8550 /* RNID received */
8551
8552 switch (rn->Format) {
8553 case 0:
8554 case RNID_TOPOLOGY_DISC:
8555 /* Send back ACC */
8556 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
8557 break;
8558 default:
8559 /* Reject this request because format not supported */
8560 stat.un.b.lsRjtRsvd0 = 0;
8561 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8562 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8563 stat.un.b.vendorUnique = 0;
8564 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
8565 NULL);
8566 }
8567 return 0;
8568 }
8569
8570 /**
8571 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
8572 * @vport: pointer to a host virtual N_Port data structure.
8573 * @cmdiocb: pointer to lpfc command iocb data structure.
8574 * @ndlp: pointer to a node-list data structure.
8575 *
8576 * Return code
8577 * 0 - Successfully processed echo iocb (currently always return 0)
8578 **/
8579 static int
lpfc_els_rcv_echo(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8580 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8581 struct lpfc_nodelist *ndlp)
8582 {
8583 uint8_t *pcmd;
8584
8585 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt;
8586
8587 /* skip over first word of echo command to find echo data */
8588 pcmd += sizeof(uint32_t);
8589
8590 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
8591 return 0;
8592 }
8593
8594 /**
8595 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
8596 * @vport: pointer to a host virtual N_Port data structure.
8597 * @cmdiocb: pointer to lpfc command iocb data structure.
8598 * @ndlp: pointer to a node-list data structure.
8599 *
8600 * This routine processes a Link Incident Report Registration(LIRR) IOCB
8601 * received as an ELS unsolicited event. Currently, this function just invokes
8602 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
8603 *
8604 * Return code
8605 * 0 - Successfully processed lirr iocb (currently always return 0)
8606 **/
8607 static int
lpfc_els_rcv_lirr(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8608 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8609 struct lpfc_nodelist *ndlp)
8610 {
8611 struct ls_rjt stat;
8612
8613 /* For now, unconditionally reject this command */
8614 stat.un.b.lsRjtRsvd0 = 0;
8615 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8616 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8617 stat.un.b.vendorUnique = 0;
8618 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
8619 return 0;
8620 }
8621
8622 /**
8623 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
8624 * @vport: pointer to a host virtual N_Port data structure.
8625 * @cmdiocb: pointer to lpfc command iocb data structure.
8626 * @ndlp: pointer to a node-list data structure.
8627 *
8628 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
8629 * received as an ELS unsolicited event. A request to RRQ shall only
8630 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
8631 * Nx_Port N_Port_ID of the target Exchange is the same as the
8632 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
8633 * not accepted, an LS_RJT with reason code "Unable to perform
8634 * command request" and reason code explanation "Invalid Originator
8635 * S_ID" shall be returned. For now, we just unconditionally accept
8636 * RRQ from the target.
8637 **/
8638 static void
lpfc_els_rcv_rrq(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8639 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8640 struct lpfc_nodelist *ndlp)
8641 {
8642 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8643 if (vport->phba->sli_rev == LPFC_SLI_REV4)
8644 lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
8645 }
8646
8647 /**
8648 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
8649 * @phba: pointer to lpfc hba data structure.
8650 * @pmb: pointer to the driver internal queue element for mailbox command.
8651 *
8652 * This routine is the completion callback function for the MBX_READ_LNK_STAT
8653 * mailbox command. This callback function is to actually send the Accept
8654 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It
8655 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
8656 * mailbox command, constructs the RLS response with the link statistics
8657 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
8658 * response to the RLS.
8659 *
8660 * Note that the ndlp reference count will be incremented by 1 for holding the
8661 * ndlp and the reference to ndlp will be stored into the ndlp field of
8662 * the IOCB for the completion callback function to the RLS Accept Response
8663 * ELS IOCB command.
8664 *
8665 **/
8666 static void
lpfc_els_rsp_rls_acc(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)8667 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
8668 {
8669 int rc = 0;
8670 MAILBOX_t *mb;
8671 IOCB_t *icmd;
8672 union lpfc_wqe128 *wqe;
8673 struct RLS_RSP *rls_rsp;
8674 uint8_t *pcmd;
8675 struct lpfc_iocbq *elsiocb;
8676 struct lpfc_nodelist *ndlp;
8677 uint16_t oxid;
8678 uint16_t rxid;
8679 uint32_t cmdsize;
8680 u32 ulp_context;
8681
8682 mb = &pmb->u.mb;
8683
8684 ndlp = pmb->ctx_ndlp;
8685 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
8686 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
8687 pmb->ctx_buf = NULL;
8688 pmb->ctx_ndlp = NULL;
8689
8690 if (mb->mbxStatus) {
8691 mempool_free(pmb, phba->mbox_mem_pool);
8692 return;
8693 }
8694
8695 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
8696 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
8697 lpfc_max_els_tries, ndlp,
8698 ndlp->nlp_DID, ELS_CMD_ACC);
8699
8700 /* Decrement the ndlp reference count from previous mbox command */
8701 lpfc_nlp_put(ndlp);
8702
8703 if (!elsiocb) {
8704 mempool_free(pmb, phba->mbox_mem_pool);
8705 return;
8706 }
8707
8708 ulp_context = get_job_ulpcontext(phba, elsiocb);
8709 if (phba->sli_rev == LPFC_SLI_REV4) {
8710 wqe = &elsiocb->wqe;
8711 /* Xri / rx_id */
8712 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid);
8713 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid);
8714 } else {
8715 icmd = &elsiocb->iocb;
8716 icmd->ulpContext = rxid;
8717 icmd->unsli3.rcvsli3.ox_id = oxid;
8718 }
8719
8720 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
8721 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
8722 pcmd += sizeof(uint32_t); /* Skip past command */
8723 rls_rsp = (struct RLS_RSP *)pcmd;
8724
8725 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
8726 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
8727 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
8728 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
8729 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
8730 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
8731 mempool_free(pmb, phba->mbox_mem_pool);
8732 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
8733 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
8734 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
8735 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
8736 elsiocb->iotag, ulp_context,
8737 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
8738 ndlp->nlp_rpi);
8739 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
8740 phba->fc_stat.elsXmitACC++;
8741 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8742 if (!elsiocb->ndlp) {
8743 lpfc_els_free_iocb(phba, elsiocb);
8744 return;
8745 }
8746
8747 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8748 if (rc == IOCB_ERROR) {
8749 lpfc_els_free_iocb(phba, elsiocb);
8750 lpfc_nlp_put(ndlp);
8751 }
8752 return;
8753 }
8754
8755 /**
8756 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
8757 * @vport: pointer to a host virtual N_Port data structure.
8758 * @cmdiocb: pointer to lpfc command iocb data structure.
8759 * @ndlp: pointer to a node-list data structure.
8760 *
8761 * This routine processes Read Link Status (RLS) IOCB received as an
8762 * ELS unsolicited event. It first checks the remote port state. If the
8763 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
8764 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
8765 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
8766 * for reading the HBA link statistics. It is for the callback function,
8767 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
8768 * to actually sending out RPL Accept (ACC) response.
8769 *
8770 * Return codes
8771 * 0 - Successfully processed rls iocb (currently always return 0)
8772 **/
8773 static int
lpfc_els_rcv_rls(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8774 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8775 struct lpfc_nodelist *ndlp)
8776 {
8777 struct lpfc_hba *phba = vport->phba;
8778 LPFC_MBOXQ_t *mbox;
8779 struct ls_rjt stat;
8780 u32 ctx = get_job_ulpcontext(phba, cmdiocb);
8781 u32 ox_id = get_job_rcvoxid(phba, cmdiocb);
8782
8783 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
8784 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
8785 /* reject the unsolicited RLS request and done with it */
8786 goto reject_out;
8787
8788 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
8789 if (mbox) {
8790 lpfc_read_lnk_stat(phba, mbox);
8791 mbox->ctx_buf = (void *)((unsigned long)
8792 (ox_id << 16 | ctx));
8793 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
8794 if (!mbox->ctx_ndlp)
8795 goto node_err;
8796 mbox->vport = vport;
8797 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
8798 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
8799 != MBX_NOT_FINISHED)
8800 /* Mbox completion will send ELS Response */
8801 return 0;
8802 /* Decrement reference count used for the failed mbox
8803 * command.
8804 */
8805 lpfc_nlp_put(ndlp);
8806 node_err:
8807 mempool_free(mbox, phba->mbox_mem_pool);
8808 }
8809 reject_out:
8810 /* issue rejection response */
8811 stat.un.b.lsRjtRsvd0 = 0;
8812 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8813 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8814 stat.un.b.vendorUnique = 0;
8815 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
8816 return 0;
8817 }
8818
8819 /**
8820 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
8821 * @vport: pointer to a host virtual N_Port data structure.
8822 * @cmdiocb: pointer to lpfc command iocb data structure.
8823 * @ndlp: pointer to a node-list data structure.
8824 *
8825 * This routine processes Read Timout Value (RTV) IOCB received as an
8826 * ELS unsolicited event. It first checks the remote port state. If the
8827 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
8828 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
8829 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
8830 * Value (RTV) unsolicited IOCB event.
8831 *
8832 * Note that the ndlp reference count will be incremented by 1 for holding the
8833 * ndlp and the reference to ndlp will be stored into the ndlp field of
8834 * the IOCB for the completion callback function to the RTV Accept Response
8835 * ELS IOCB command.
8836 *
8837 * Return codes
8838 * 0 - Successfully processed rtv iocb (currently always return 0)
8839 **/
8840 static int
lpfc_els_rcv_rtv(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8841 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8842 struct lpfc_nodelist *ndlp)
8843 {
8844 int rc = 0;
8845 IOCB_t *icmd;
8846 union lpfc_wqe128 *wqe;
8847 struct lpfc_hba *phba = vport->phba;
8848 struct ls_rjt stat;
8849 struct RTV_RSP *rtv_rsp;
8850 uint8_t *pcmd;
8851 struct lpfc_iocbq *elsiocb;
8852 uint32_t cmdsize;
8853 u32 ulp_context;
8854
8855 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
8856 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
8857 /* reject the unsolicited RTV request and done with it */
8858 goto reject_out;
8859
8860 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
8861 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
8862 lpfc_max_els_tries, ndlp,
8863 ndlp->nlp_DID, ELS_CMD_ACC);
8864
8865 if (!elsiocb)
8866 return 1;
8867
8868 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
8869 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
8870 pcmd += sizeof(uint32_t); /* Skip past command */
8871
8872 ulp_context = get_job_ulpcontext(phba, elsiocb);
8873 /* use the command's xri in the response */
8874 if (phba->sli_rev == LPFC_SLI_REV4) {
8875 wqe = &elsiocb->wqe;
8876 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
8877 get_job_ulpcontext(phba, cmdiocb));
8878 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8879 get_job_rcvoxid(phba, cmdiocb));
8880 } else {
8881 icmd = &elsiocb->iocb;
8882 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb);
8883 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb);
8884 }
8885
8886 rtv_rsp = (struct RTV_RSP *)pcmd;
8887
8888 /* populate RTV payload */
8889 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
8890 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
8891 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
8892 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
8893 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
8894
8895 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
8896 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
8897 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
8898 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
8899 "Data: x%x x%x x%x\n",
8900 elsiocb->iotag, ulp_context,
8901 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
8902 ndlp->nlp_rpi,
8903 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
8904 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
8905 phba->fc_stat.elsXmitACC++;
8906 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8907 if (!elsiocb->ndlp) {
8908 lpfc_els_free_iocb(phba, elsiocb);
8909 return 0;
8910 }
8911
8912 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8913 if (rc == IOCB_ERROR) {
8914 lpfc_els_free_iocb(phba, elsiocb);
8915 lpfc_nlp_put(ndlp);
8916 }
8917 return 0;
8918
8919 reject_out:
8920 /* issue rejection response */
8921 stat.un.b.lsRjtRsvd0 = 0;
8922 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8923 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8924 stat.un.b.vendorUnique = 0;
8925 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
8926 return 0;
8927 }
8928
8929 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb
8930 * @vport: pointer to a host virtual N_Port data structure.
8931 * @ndlp: pointer to a node-list data structure.
8932 * @did: DID of the target.
8933 * @rrq: Pointer to the rrq struct.
8934 *
8935 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
8936 * successful, the completion handler will clear the RRQ.
8937 *
8938 * Return codes
8939 * 0 - Successfully sent rrq els iocb.
8940 * 1 - Failed to send rrq els iocb.
8941 **/
8942 static int
lpfc_issue_els_rrq(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint32_t did,struct lpfc_node_rrq * rrq)8943 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
8944 uint32_t did, struct lpfc_node_rrq *rrq)
8945 {
8946 struct lpfc_hba *phba = vport->phba;
8947 struct RRQ *els_rrq;
8948 struct lpfc_iocbq *elsiocb;
8949 uint8_t *pcmd;
8950 uint16_t cmdsize;
8951 int ret;
8952
8953 if (!ndlp)
8954 return 1;
8955
8956 /* If ndlp is not NULL, we will bump the reference count on it */
8957 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
8958 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
8959 ELS_CMD_RRQ);
8960 if (!elsiocb)
8961 return 1;
8962
8963 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
8964
8965 /* For RRQ request, remainder of payload is Exchange IDs */
8966 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
8967 pcmd += sizeof(uint32_t);
8968 els_rrq = (struct RRQ *) pcmd;
8969
8970 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
8971 bf_set(rrq_rxid, els_rrq, rrq->rxid);
8972 bf_set(rrq_did, els_rrq, vport->fc_myDID);
8973 els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
8974 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
8975
8976
8977 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8978 "Issue RRQ: did:x%x",
8979 did, rrq->xritag, rrq->rxid);
8980 elsiocb->context_un.rrq = rrq;
8981 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq;
8982
8983 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8984 if (!elsiocb->ndlp)
8985 goto io_err;
8986
8987 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8988 if (ret == IOCB_ERROR) {
8989 lpfc_nlp_put(ndlp);
8990 goto io_err;
8991 }
8992 return 0;
8993
8994 io_err:
8995 lpfc_els_free_iocb(phba, elsiocb);
8996 return 1;
8997 }
8998
8999 /**
9000 * lpfc_send_rrq - Sends ELS RRQ if needed.
9001 * @phba: pointer to lpfc hba data structure.
9002 * @rrq: pointer to the active rrq.
9003 *
9004 * This routine will call the lpfc_issue_els_rrq if the rrq is
9005 * still active for the xri. If this function returns a failure then
9006 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
9007 *
9008 * Returns 0 Success.
9009 * 1 Failure.
9010 **/
9011 int
lpfc_send_rrq(struct lpfc_hba * phba,struct lpfc_node_rrq * rrq)9012 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
9013 {
9014 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
9015 rrq->nlp_DID);
9016 if (!ndlp)
9017 return 1;
9018
9019 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
9020 return lpfc_issue_els_rrq(rrq->vport, ndlp,
9021 rrq->nlp_DID, rrq);
9022 else
9023 return 1;
9024 }
9025
9026 /**
9027 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
9028 * @vport: pointer to a host virtual N_Port data structure.
9029 * @cmdsize: size of the ELS command.
9030 * @oldiocb: pointer to the original lpfc command iocb data structure.
9031 * @ndlp: pointer to a node-list data structure.
9032 *
9033 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
9034 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
9035 *
9036 * Note that the ndlp reference count will be incremented by 1 for holding the
9037 * ndlp and the reference to ndlp will be stored into the ndlp field of
9038 * the IOCB for the completion callback function to the RPL Accept Response
9039 * ELS command.
9040 *
9041 * Return code
9042 * 0 - Successfully issued ACC RPL ELS command
9043 * 1 - Failed to issue ACC RPL ELS command
9044 **/
9045 static int
lpfc_els_rsp_rpl_acc(struct lpfc_vport * vport,uint16_t cmdsize,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)9046 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
9047 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
9048 {
9049 int rc = 0;
9050 struct lpfc_hba *phba = vport->phba;
9051 IOCB_t *icmd;
9052 union lpfc_wqe128 *wqe;
9053 RPL_RSP rpl_rsp;
9054 struct lpfc_iocbq *elsiocb;
9055 uint8_t *pcmd;
9056 u32 ulp_context;
9057
9058 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
9059 ndlp->nlp_DID, ELS_CMD_ACC);
9060
9061 if (!elsiocb)
9062 return 1;
9063
9064 ulp_context = get_job_ulpcontext(phba, elsiocb);
9065 if (phba->sli_rev == LPFC_SLI_REV4) {
9066 wqe = &elsiocb->wqe;
9067 /* Xri / rx_id */
9068 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
9069 get_job_ulpcontext(phba, oldiocb));
9070 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9071 get_job_rcvoxid(phba, oldiocb));
9072 } else {
9073 icmd = &elsiocb->iocb;
9074 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb);
9075 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb);
9076 }
9077
9078 pcmd = elsiocb->cmd_dmabuf->virt;
9079 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
9080 pcmd += sizeof(uint16_t);
9081 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
9082 pcmd += sizeof(uint16_t);
9083
9084 /* Setup the RPL ACC payload */
9085 rpl_rsp.listLen = be32_to_cpu(1);
9086 rpl_rsp.index = 0;
9087 rpl_rsp.port_num_blk.portNum = 0;
9088 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
9089 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
9090 sizeof(struct lpfc_name));
9091 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
9092 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
9093 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9094 "0120 Xmit ELS RPL ACC response tag x%x "
9095 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
9096 "rpi x%x\n",
9097 elsiocb->iotag, ulp_context,
9098 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
9099 ndlp->nlp_rpi);
9100 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
9101 phba->fc_stat.elsXmitACC++;
9102 elsiocb->ndlp = lpfc_nlp_get(ndlp);
9103 if (!elsiocb->ndlp) {
9104 lpfc_els_free_iocb(phba, elsiocb);
9105 return 1;
9106 }
9107
9108 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
9109 if (rc == IOCB_ERROR) {
9110 lpfc_els_free_iocb(phba, elsiocb);
9111 lpfc_nlp_put(ndlp);
9112 return 1;
9113 }
9114
9115 return 0;
9116 }
9117
9118 /**
9119 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
9120 * @vport: pointer to a host virtual N_Port data structure.
9121 * @cmdiocb: pointer to lpfc command iocb data structure.
9122 * @ndlp: pointer to a node-list data structure.
9123 *
9124 * This routine processes Read Port List (RPL) IOCB received as an ELS
9125 * unsolicited event. It first checks the remote port state. If the remote
9126 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
9127 * invokes the lpfc_els_rsp_reject() routine to send reject response.
9128 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
9129 * to accept the RPL.
9130 *
9131 * Return code
9132 * 0 - Successfully processed rpl iocb (currently always return 0)
9133 **/
9134 static int
lpfc_els_rcv_rpl(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)9135 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9136 struct lpfc_nodelist *ndlp)
9137 {
9138 struct lpfc_dmabuf *pcmd;
9139 uint32_t *lp;
9140 uint32_t maxsize;
9141 uint16_t cmdsize;
9142 RPL *rpl;
9143 struct ls_rjt stat;
9144
9145 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
9146 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
9147 /* issue rejection response */
9148 stat.un.b.lsRjtRsvd0 = 0;
9149 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
9150 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
9151 stat.un.b.vendorUnique = 0;
9152 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
9153 NULL);
9154 /* rejected the unsolicited RPL request and done with it */
9155 return 0;
9156 }
9157
9158 pcmd = cmdiocb->cmd_dmabuf;
9159 lp = (uint32_t *) pcmd->virt;
9160 rpl = (RPL *) (lp + 1);
9161 maxsize = be32_to_cpu(rpl->maxsize);
9162
9163 /* We support only one port */
9164 if ((rpl->index == 0) &&
9165 ((maxsize == 0) ||
9166 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
9167 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
9168 } else {
9169 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
9170 }
9171 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
9172
9173 return 0;
9174 }
9175
9176 /**
9177 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
9178 * @vport: pointer to a virtual N_Port data structure.
9179 * @cmdiocb: pointer to lpfc command iocb data structure.
9180 * @ndlp: pointer to a node-list data structure.
9181 *
9182 * This routine processes Fibre Channel Address Resolution Protocol
9183 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
9184 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
9185 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
9186 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
9187 * remote PortName is compared against the FC PortName stored in the @vport
9188 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
9189 * compared against the FC NodeName stored in the @vport data structure.
9190 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
9191 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
9192 * invoked to send out FARP Response to the remote node. Before sending the
9193 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
9194 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
9195 * routine is invoked to log into the remote port first.
9196 *
9197 * Return code
9198 * 0 - Either the FARP Match Mode not supported or successfully processed
9199 **/
9200 static int
lpfc_els_rcv_farp(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)9201 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9202 struct lpfc_nodelist *ndlp)
9203 {
9204 struct lpfc_dmabuf *pcmd;
9205 uint32_t *lp;
9206 FARP *fp;
9207 uint32_t cnt, did;
9208
9209 did = get_job_els_rsp64_did(vport->phba, cmdiocb);
9210 pcmd = cmdiocb->cmd_dmabuf;
9211 lp = (uint32_t *) pcmd->virt;
9212
9213 lp++;
9214 fp = (FARP *) lp;
9215 /* FARP-REQ received from DID <did> */
9216 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9217 "0601 FARP-REQ received from DID x%x\n", did);
9218 /* We will only support match on WWPN or WWNN */
9219 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
9220 return 0;
9221 }
9222
9223 cnt = 0;
9224 /* If this FARP command is searching for my portname */
9225 if (fp->Mflags & FARP_MATCH_PORT) {
9226 if (memcmp(&fp->RportName, &vport->fc_portname,
9227 sizeof(struct lpfc_name)) == 0)
9228 cnt = 1;
9229 }
9230
9231 /* If this FARP command is searching for my nodename */
9232 if (fp->Mflags & FARP_MATCH_NODE) {
9233 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
9234 sizeof(struct lpfc_name)) == 0)
9235 cnt = 1;
9236 }
9237
9238 if (cnt) {
9239 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
9240 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
9241 /* Log back into the node before sending the FARP. */
9242 if (fp->Rflags & FARP_REQUEST_PLOGI) {
9243 ndlp->nlp_prev_state = ndlp->nlp_state;
9244 lpfc_nlp_set_state(vport, ndlp,
9245 NLP_STE_PLOGI_ISSUE);
9246 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
9247 }
9248
9249 /* Send a FARP response to that node */
9250 if (fp->Rflags & FARP_REQUEST_FARPR)
9251 lpfc_issue_els_farpr(vport, did, 0);
9252 }
9253 }
9254 return 0;
9255 }
9256
9257 /**
9258 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
9259 * @vport: pointer to a host virtual N_Port data structure.
9260 * @cmdiocb: pointer to lpfc command iocb data structure.
9261 * @ndlp: pointer to a node-list data structure.
9262 *
9263 * This routine processes Fibre Channel Address Resolution Protocol
9264 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
9265 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
9266 * the FARP response request.
9267 *
9268 * Return code
9269 * 0 - Successfully processed FARPR IOCB (currently always return 0)
9270 **/
9271 static int
lpfc_els_rcv_farpr(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)9272 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9273 struct lpfc_nodelist *ndlp)
9274 {
9275 uint32_t did;
9276
9277 did = get_job_els_rsp64_did(vport->phba, cmdiocb);
9278
9279 /* FARP-RSP received from DID <did> */
9280 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9281 "0600 FARP-RSP received from DID x%x\n", did);
9282 /* ACCEPT the Farp resp request */
9283 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
9284
9285 return 0;
9286 }
9287
9288 /**
9289 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
9290 * @vport: pointer to a host virtual N_Port data structure.
9291 * @cmdiocb: pointer to lpfc command iocb data structure.
9292 * @fan_ndlp: pointer to a node-list data structure.
9293 *
9294 * This routine processes a Fabric Address Notification (FAN) IOCB
9295 * command received as an ELS unsolicited event. The FAN ELS command will
9296 * only be processed on a physical port (i.e., the @vport represents the
9297 * physical port). The fabric NodeName and PortName from the FAN IOCB are
9298 * compared against those in the phba data structure. If any of those is
9299 * different, the lpfc_initial_flogi() routine is invoked to initialize
9300 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
9301 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
9302 * is invoked to register login to the fabric.
9303 *
9304 * Return code
9305 * 0 - Successfully processed fan iocb (currently always return 0).
9306 **/
9307 static int
lpfc_els_rcv_fan(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * fan_ndlp)9308 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9309 struct lpfc_nodelist *fan_ndlp)
9310 {
9311 struct lpfc_hba *phba = vport->phba;
9312 uint32_t *lp;
9313 FAN *fp;
9314
9315 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
9316 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
9317 fp = (FAN *) ++lp;
9318 /* FAN received; Fan does not have a reply sequence */
9319 if ((vport == phba->pport) &&
9320 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
9321 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
9322 sizeof(struct lpfc_name))) ||
9323 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
9324 sizeof(struct lpfc_name)))) {
9325 /* This port has switched fabrics. FLOGI is required */
9326 lpfc_issue_init_vfi(vport);
9327 } else {
9328 /* FAN verified - skip FLOGI */
9329 vport->fc_myDID = vport->fc_prevDID;
9330 if (phba->sli_rev < LPFC_SLI_REV4)
9331 lpfc_issue_fabric_reglogin(vport);
9332 else {
9333 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9334 "3138 Need register VFI: (x%x/%x)\n",
9335 vport->fc_prevDID, vport->fc_myDID);
9336 lpfc_issue_reg_vfi(vport);
9337 }
9338 }
9339 }
9340 return 0;
9341 }
9342
9343 /**
9344 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb
9345 * @vport: pointer to a host virtual N_Port data structure.
9346 * @cmdiocb: pointer to lpfc command iocb data structure.
9347 * @ndlp: pointer to a node-list data structure.
9348 *
9349 * Return code
9350 * 0 - Successfully processed echo iocb (currently always return 0)
9351 **/
9352 static int
lpfc_els_rcv_edc(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)9353 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9354 struct lpfc_nodelist *ndlp)
9355 {
9356 struct lpfc_hba *phba = vport->phba;
9357 struct fc_els_edc *edc_req;
9358 struct fc_tlv_desc *tlv;
9359 uint8_t *payload;
9360 uint32_t *ptr, dtag;
9361 const char *dtag_nm;
9362 int desc_cnt = 0, bytes_remain;
9363 struct fc_diag_lnkflt_desc *plnkflt;
9364
9365 payload = cmdiocb->cmd_dmabuf->virt;
9366
9367 edc_req = (struct fc_els_edc *)payload;
9368 bytes_remain = be32_to_cpu(edc_req->desc_len);
9369
9370 ptr = (uint32_t *)payload;
9371 lpfc_printf_vlog(vport, KERN_INFO,
9372 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
9373 "3319 Rcv EDC payload len %d: x%x x%x x%x\n",
9374 bytes_remain, be32_to_cpu(*ptr),
9375 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2)));
9376
9377 /* No signal support unless there is a congestion descriptor */
9378 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
9379 phba->cgn_sig_freq = 0;
9380 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
9381
9382 if (bytes_remain <= 0)
9383 goto out;
9384
9385 tlv = edc_req->desc;
9386
9387 /*
9388 * cycle through EDC diagnostic descriptors to find the
9389 * congestion signaling capability descriptor
9390 */
9391 while (bytes_remain) {
9392 if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
9393 lpfc_printf_log(phba, KERN_WARNING,
9394 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
9395 "6464 Truncated TLV hdr on "
9396 "Diagnostic descriptor[%d]\n",
9397 desc_cnt);
9398 goto out;
9399 }
9400
9401 dtag = be32_to_cpu(tlv->desc_tag);
9402 switch (dtag) {
9403 case ELS_DTAG_LNK_FAULT_CAP:
9404 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
9405 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
9406 sizeof(struct fc_diag_lnkflt_desc)) {
9407 lpfc_printf_log(phba, KERN_WARNING,
9408 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
9409 "6465 Truncated Link Fault Diagnostic "
9410 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
9411 desc_cnt, bytes_remain,
9412 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
9413 sizeof(struct fc_diag_lnkflt_desc));
9414 goto out;
9415 }
9416 plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
9417 lpfc_printf_log(phba, KERN_INFO,
9418 LOG_ELS | LOG_LDS_EVENT,
9419 "4626 Link Fault Desc Data: x%08x len x%x "
9420 "da x%x dd x%x interval x%x\n",
9421 be32_to_cpu(plnkflt->desc_tag),
9422 be32_to_cpu(plnkflt->desc_len),
9423 be32_to_cpu(
9424 plnkflt->degrade_activate_threshold),
9425 be32_to_cpu(
9426 plnkflt->degrade_deactivate_threshold),
9427 be32_to_cpu(plnkflt->fec_degrade_interval));
9428 break;
9429 case ELS_DTAG_CG_SIGNAL_CAP:
9430 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
9431 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
9432 sizeof(struct fc_diag_cg_sig_desc)) {
9433 lpfc_printf_log(
9434 phba, KERN_WARNING, LOG_CGN_MGMT,
9435 "6466 Truncated cgn signal Diagnostic "
9436 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
9437 desc_cnt, bytes_remain,
9438 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
9439 sizeof(struct fc_diag_cg_sig_desc));
9440 goto out;
9441 }
9442
9443 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
9444 phba->cgn_reg_signal = phba->cgn_init_reg_signal;
9445
9446 /* We start negotiation with lpfc_fabric_cgn_frequency.
9447 * When we process the EDC, we will settle on the
9448 * higher frequency.
9449 */
9450 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
9451
9452 lpfc_least_capable_settings(
9453 phba, (struct fc_diag_cg_sig_desc *)tlv);
9454 break;
9455 default:
9456 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
9457 lpfc_printf_log(phba, KERN_WARNING,
9458 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
9459 "6467 unknown Diagnostic "
9460 "Descriptor[%d]: tag x%x (%s)\n",
9461 desc_cnt, dtag, dtag_nm);
9462 }
9463 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
9464 tlv = fc_tlv_next_desc(tlv);
9465 desc_cnt++;
9466 }
9467 out:
9468 /* Need to send back an ACC */
9469 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp);
9470
9471 lpfc_config_cgn_signal(phba);
9472 return 0;
9473 }
9474
9475 /**
9476 * lpfc_els_timeout - Handler funciton to the els timer
9477 * @t: timer context used to obtain the vport.
9478 *
9479 * This routine is invoked by the ELS timer after timeout. It posts the ELS
9480 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
9481 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
9482 * up the worker thread. It is for the worker thread to invoke the routine
9483 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
9484 **/
9485 void
lpfc_els_timeout(struct timer_list * t)9486 lpfc_els_timeout(struct timer_list *t)
9487 {
9488 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc);
9489 struct lpfc_hba *phba = vport->phba;
9490 uint32_t tmo_posted;
9491 unsigned long iflag;
9492
9493 spin_lock_irqsave(&vport->work_port_lock, iflag);
9494 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
9495 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
9496 vport->work_port_events |= WORKER_ELS_TMO;
9497 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
9498
9499 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
9500 lpfc_worker_wake_up(phba);
9501 return;
9502 }
9503
9504
9505 /**
9506 * lpfc_els_timeout_handler - Process an els timeout event
9507 * @vport: pointer to a virtual N_Port data structure.
9508 *
9509 * This routine is the actual handler function that processes an ELS timeout
9510 * event. It walks the ELS ring to get and abort all the IOCBs (except the
9511 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
9512 * invoking the lpfc_sli_issue_abort_iotag() routine.
9513 **/
9514 void
lpfc_els_timeout_handler(struct lpfc_vport * vport)9515 lpfc_els_timeout_handler(struct lpfc_vport *vport)
9516 {
9517 struct lpfc_hba *phba = vport->phba;
9518 struct lpfc_sli_ring *pring;
9519 struct lpfc_iocbq *tmp_iocb, *piocb;
9520 IOCB_t *cmd = NULL;
9521 struct lpfc_dmabuf *pcmd;
9522 uint32_t els_command = 0;
9523 uint32_t timeout;
9524 uint32_t remote_ID = 0xffffffff;
9525 LIST_HEAD(abort_list);
9526 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0;
9527
9528
9529 timeout = (uint32_t)(phba->fc_ratov << 1);
9530
9531 pring = lpfc_phba_elsring(phba);
9532 if (unlikely(!pring))
9533 return;
9534
9535 if (phba->pport->load_flag & FC_UNLOADING)
9536 return;
9537
9538 spin_lock_irq(&phba->hbalock);
9539 if (phba->sli_rev == LPFC_SLI_REV4)
9540 spin_lock(&pring->ring_lock);
9541
9542 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
9543 ulp_command = get_job_cmnd(phba, piocb);
9544 ulp_context = get_job_ulpcontext(phba, piocb);
9545 did = get_job_els_rsp64_did(phba, piocb);
9546
9547 if (phba->sli_rev == LPFC_SLI_REV4) {
9548 iotag = get_wqe_reqtag(piocb);
9549 } else {
9550 cmd = &piocb->iocb;
9551 iotag = cmd->ulpIoTag;
9552 }
9553
9554 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 ||
9555 ulp_command == CMD_ABORT_XRI_CX ||
9556 ulp_command == CMD_ABORT_XRI_CN ||
9557 ulp_command == CMD_CLOSE_XRI_CN)
9558 continue;
9559
9560 if (piocb->vport != vport)
9561 continue;
9562
9563 pcmd = piocb->cmd_dmabuf;
9564 if (pcmd)
9565 els_command = *(uint32_t *) (pcmd->virt);
9566
9567 if (els_command == ELS_CMD_FARP ||
9568 els_command == ELS_CMD_FARPR ||
9569 els_command == ELS_CMD_FDISC)
9570 continue;
9571
9572 if (piocb->drvrTimeout > 0) {
9573 if (piocb->drvrTimeout >= timeout)
9574 piocb->drvrTimeout -= timeout;
9575 else
9576 piocb->drvrTimeout = 0;
9577 continue;
9578 }
9579
9580 remote_ID = 0xffffffff;
9581 if (ulp_command != CMD_GEN_REQUEST64_CR) {
9582 remote_ID = did;
9583 } else {
9584 struct lpfc_nodelist *ndlp;
9585 ndlp = __lpfc_findnode_rpi(vport, ulp_context);
9586 if (ndlp)
9587 remote_ID = ndlp->nlp_DID;
9588 }
9589 list_add_tail(&piocb->dlist, &abort_list);
9590 }
9591 if (phba->sli_rev == LPFC_SLI_REV4)
9592 spin_unlock(&pring->ring_lock);
9593 spin_unlock_irq(&phba->hbalock);
9594
9595 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
9596 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9597 "0127 ELS timeout Data: x%x x%x x%x "
9598 "x%x\n", els_command,
9599 remote_ID, ulp_command, iotag);
9600
9601 spin_lock_irq(&phba->hbalock);
9602 list_del_init(&piocb->dlist);
9603 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
9604 spin_unlock_irq(&phba->hbalock);
9605 }
9606
9607 /* Make sure HBA is alive */
9608 lpfc_issue_hb_tmo(phba);
9609
9610 if (!list_empty(&pring->txcmplq))
9611 if (!(phba->pport->load_flag & FC_UNLOADING))
9612 mod_timer(&vport->els_tmofunc,
9613 jiffies + msecs_to_jiffies(1000 * timeout));
9614 }
9615
9616 /**
9617 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
9618 * @vport: pointer to a host virtual N_Port data structure.
9619 *
9620 * This routine is used to clean up all the outstanding ELS commands on a
9621 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
9622 * routine. After that, it walks the ELS transmit queue to remove all the
9623 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
9624 * the IOCBs with a non-NULL completion callback function, the callback
9625 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
9626 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
9627 * callback function, the IOCB will simply be released. Finally, it walks
9628 * the ELS transmit completion queue to issue an abort IOCB to any transmit
9629 * completion queue IOCB that is associated with the @vport and is not
9630 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
9631 * part of the discovery state machine) out to HBA by invoking the
9632 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
9633 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
9634 * the IOCBs are aborted when this function returns.
9635 **/
9636 void
lpfc_els_flush_cmd(struct lpfc_vport * vport)9637 lpfc_els_flush_cmd(struct lpfc_vport *vport)
9638 {
9639 LIST_HEAD(abort_list);
9640 LIST_HEAD(cancel_list);
9641 struct lpfc_hba *phba = vport->phba;
9642 struct lpfc_sli_ring *pring;
9643 struct lpfc_iocbq *tmp_iocb, *piocb;
9644 u32 ulp_command;
9645 unsigned long iflags = 0;
9646 bool mbx_tmo_err;
9647
9648 lpfc_fabric_abort_vport(vport);
9649
9650 /*
9651 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
9652 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
9653 * ultimately grabs the ring_lock, the driver must splice the list into
9654 * a working list and release the locks before calling the abort.
9655 */
9656 spin_lock_irqsave(&phba->hbalock, iflags);
9657 pring = lpfc_phba_elsring(phba);
9658
9659 /* Bail out if we've no ELS wq, like in PCI error recovery case. */
9660 if (unlikely(!pring)) {
9661 spin_unlock_irqrestore(&phba->hbalock, iflags);
9662 return;
9663 }
9664
9665 if (phba->sli_rev == LPFC_SLI_REV4)
9666 spin_lock(&pring->ring_lock);
9667
9668 mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
9669 /* First we need to issue aborts to outstanding cmds on txcmpl */
9670 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
9671 if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err)
9672 continue;
9673
9674 if (piocb->vport != vport)
9675 continue;
9676
9677 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
9678 continue;
9679
9680 /* On the ELS ring we can have ELS_REQUESTs or
9681 * GEN_REQUESTs waiting for a response.
9682 */
9683 ulp_command = get_job_cmnd(phba, piocb);
9684 if (ulp_command == CMD_ELS_REQUEST64_CR) {
9685 list_add_tail(&piocb->dlist, &abort_list);
9686
9687 /* If the link is down when flushing ELS commands
9688 * the firmware will not complete them till after
9689 * the link comes back up. This may confuse
9690 * discovery for the new link up, so we need to
9691 * change the compl routine to just clean up the iocb
9692 * and avoid any retry logic.
9693 */
9694 if (phba->link_state == LPFC_LINK_DOWN)
9695 piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
9696 } else if (ulp_command == CMD_GEN_REQUEST64_CR ||
9697 mbx_tmo_err)
9698 list_add_tail(&piocb->dlist, &abort_list);
9699 }
9700
9701 if (phba->sli_rev == LPFC_SLI_REV4)
9702 spin_unlock(&pring->ring_lock);
9703 spin_unlock_irqrestore(&phba->hbalock, iflags);
9704
9705 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */
9706 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
9707 spin_lock_irqsave(&phba->hbalock, iflags);
9708 list_del_init(&piocb->dlist);
9709 if (mbx_tmo_err)
9710 list_move_tail(&piocb->list, &cancel_list);
9711 else
9712 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
9713
9714 spin_unlock_irqrestore(&phba->hbalock, iflags);
9715 }
9716 if (!list_empty(&cancel_list))
9717 lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT,
9718 IOERR_SLI_ABORTED);
9719 else
9720 /* Make sure HBA is alive */
9721 lpfc_issue_hb_tmo(phba);
9722
9723 if (!list_empty(&abort_list))
9724 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9725 "3387 abort list for txq not empty\n");
9726 INIT_LIST_HEAD(&abort_list);
9727
9728 spin_lock_irqsave(&phba->hbalock, iflags);
9729 if (phba->sli_rev == LPFC_SLI_REV4)
9730 spin_lock(&pring->ring_lock);
9731
9732 /* No need to abort the txq list,
9733 * just queue them up for lpfc_sli_cancel_iocbs
9734 */
9735 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
9736 ulp_command = get_job_cmnd(phba, piocb);
9737
9738 if (piocb->cmd_flag & LPFC_IO_LIBDFC)
9739 continue;
9740
9741 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
9742 if (ulp_command == CMD_QUE_RING_BUF_CN ||
9743 ulp_command == CMD_QUE_RING_BUF64_CN ||
9744 ulp_command == CMD_CLOSE_XRI_CN ||
9745 ulp_command == CMD_ABORT_XRI_CN ||
9746 ulp_command == CMD_ABORT_XRI_CX)
9747 continue;
9748
9749 if (piocb->vport != vport)
9750 continue;
9751
9752 list_del_init(&piocb->list);
9753 list_add_tail(&piocb->list, &abort_list);
9754 }
9755
9756 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */
9757 if (vport == phba->pport) {
9758 list_for_each_entry_safe(piocb, tmp_iocb,
9759 &phba->fabric_iocb_list, list) {
9760 list_del_init(&piocb->list);
9761 list_add_tail(&piocb->list, &abort_list);
9762 }
9763 }
9764
9765 if (phba->sli_rev == LPFC_SLI_REV4)
9766 spin_unlock(&pring->ring_lock);
9767 spin_unlock_irqrestore(&phba->hbalock, iflags);
9768
9769 /* Cancel all the IOCBs from the completions list */
9770 lpfc_sli_cancel_iocbs(phba, &abort_list,
9771 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
9772
9773 return;
9774 }
9775
9776 /**
9777 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
9778 * @phba: pointer to lpfc hba data structure.
9779 *
9780 * This routine is used to clean up all the outstanding ELS commands on a
9781 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
9782 * routine. After that, it walks the ELS transmit queue to remove all the
9783 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
9784 * the IOCBs with the completion callback function associated, the callback
9785 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
9786 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
9787 * callback function associated, the IOCB will simply be released. Finally,
9788 * it walks the ELS transmit completion queue to issue an abort IOCB to any
9789 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
9790 * management plane IOCBs that are not part of the discovery state machine)
9791 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
9792 **/
9793 void
lpfc_els_flush_all_cmd(struct lpfc_hba * phba)9794 lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
9795 {
9796 struct lpfc_vport *vport;
9797
9798 spin_lock_irq(&phba->port_list_lock);
9799 list_for_each_entry(vport, &phba->port_list, listentry)
9800 lpfc_els_flush_cmd(vport);
9801 spin_unlock_irq(&phba->port_list_lock);
9802
9803 return;
9804 }
9805
9806 /**
9807 * lpfc_send_els_failure_event - Posts an ELS command failure event
9808 * @phba: Pointer to hba context object.
9809 * @cmdiocbp: Pointer to command iocb which reported error.
9810 * @rspiocbp: Pointer to response iocb which reported error.
9811 *
9812 * This function sends an event when there is an ELS command
9813 * failure.
9814 **/
9815 void
lpfc_send_els_failure_event(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbp,struct lpfc_iocbq * rspiocbp)9816 lpfc_send_els_failure_event(struct lpfc_hba *phba,
9817 struct lpfc_iocbq *cmdiocbp,
9818 struct lpfc_iocbq *rspiocbp)
9819 {
9820 struct lpfc_vport *vport = cmdiocbp->vport;
9821 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9822 struct lpfc_lsrjt_event lsrjt_event;
9823 struct lpfc_fabric_event_header fabric_event;
9824 struct ls_rjt stat;
9825 struct lpfc_nodelist *ndlp;
9826 uint32_t *pcmd;
9827 u32 ulp_status, ulp_word4;
9828
9829 ndlp = cmdiocbp->ndlp;
9830 if (!ndlp)
9831 return;
9832
9833 ulp_status = get_job_ulpstatus(phba, rspiocbp);
9834 ulp_word4 = get_job_word4(phba, rspiocbp);
9835
9836 if (ulp_status == IOSTAT_LS_RJT) {
9837 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
9838 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
9839 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
9840 sizeof(struct lpfc_name));
9841 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
9842 sizeof(struct lpfc_name));
9843 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt;
9844 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
9845 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
9846 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
9847 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
9848 fc_host_post_vendor_event(shost,
9849 fc_get_event_number(),
9850 sizeof(lsrjt_event),
9851 (char *)&lsrjt_event,
9852 LPFC_NL_VENDOR_ID);
9853 return;
9854 }
9855 if (ulp_status == IOSTAT_NPORT_BSY ||
9856 ulp_status == IOSTAT_FABRIC_BSY) {
9857 fabric_event.event_type = FC_REG_FABRIC_EVENT;
9858 if (ulp_status == IOSTAT_NPORT_BSY)
9859 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
9860 else
9861 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
9862 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
9863 sizeof(struct lpfc_name));
9864 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
9865 sizeof(struct lpfc_name));
9866 fc_host_post_vendor_event(shost,
9867 fc_get_event_number(),
9868 sizeof(fabric_event),
9869 (char *)&fabric_event,
9870 LPFC_NL_VENDOR_ID);
9871 return;
9872 }
9873
9874 }
9875
9876 /**
9877 * lpfc_send_els_event - Posts unsolicited els event
9878 * @vport: Pointer to vport object.
9879 * @ndlp: Pointer FC node object.
9880 * @payload: ELS command code type.
9881 *
9882 * This function posts an event when there is an incoming
9883 * unsolicited ELS command.
9884 **/
9885 static void
lpfc_send_els_event(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint32_t * payload)9886 lpfc_send_els_event(struct lpfc_vport *vport,
9887 struct lpfc_nodelist *ndlp,
9888 uint32_t *payload)
9889 {
9890 struct lpfc_els_event_header *els_data = NULL;
9891 struct lpfc_logo_event *logo_data = NULL;
9892 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9893
9894 if (*payload == ELS_CMD_LOGO) {
9895 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
9896 if (!logo_data) {
9897 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9898 "0148 Failed to allocate memory "
9899 "for LOGO event\n");
9900 return;
9901 }
9902 els_data = &logo_data->header;
9903 } else {
9904 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
9905 GFP_KERNEL);
9906 if (!els_data) {
9907 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9908 "0149 Failed to allocate memory "
9909 "for ELS event\n");
9910 return;
9911 }
9912 }
9913 els_data->event_type = FC_REG_ELS_EVENT;
9914 switch (*payload) {
9915 case ELS_CMD_PLOGI:
9916 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
9917 break;
9918 case ELS_CMD_PRLO:
9919 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
9920 break;
9921 case ELS_CMD_ADISC:
9922 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
9923 break;
9924 case ELS_CMD_LOGO:
9925 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
9926 /* Copy the WWPN in the LOGO payload */
9927 memcpy(logo_data->logo_wwpn, &payload[2],
9928 sizeof(struct lpfc_name));
9929 break;
9930 default:
9931 kfree(els_data);
9932 return;
9933 }
9934 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
9935 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
9936 if (*payload == ELS_CMD_LOGO) {
9937 fc_host_post_vendor_event(shost,
9938 fc_get_event_number(),
9939 sizeof(struct lpfc_logo_event),
9940 (char *)logo_data,
9941 LPFC_NL_VENDOR_ID);
9942 kfree(logo_data);
9943 } else {
9944 fc_host_post_vendor_event(shost,
9945 fc_get_event_number(),
9946 sizeof(struct lpfc_els_event_header),
9947 (char *)els_data,
9948 LPFC_NL_VENDOR_ID);
9949 kfree(els_data);
9950 }
9951
9952 return;
9953 }
9954
9955
9956 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types,
9957 FC_FPIN_LI_EVT_TYPES_INIT);
9958
9959 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types,
9960 FC_FPIN_DELI_EVT_TYPES_INIT);
9961
9962 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types,
9963 FC_FPIN_CONGN_EVT_TYPES_INIT);
9964
9965 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm,
9966 fc_fpin_congn_severity_types,
9967 FC_FPIN_CONGN_SEVERITY_INIT);
9968
9969
9970 /**
9971 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port
9972 * @phba: Pointer to phba object.
9973 * @wwnlist: Pointer to list of WWPNs in FPIN payload
9974 * @cnt: count of WWPNs in FPIN payload
9975 *
9976 * This routine is called by LI and PC descriptors.
9977 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message
9978 */
9979 static void
lpfc_display_fpin_wwpn(struct lpfc_hba * phba,__be64 * wwnlist,u32 cnt)9980 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt)
9981 {
9982 char buf[LPFC_FPIN_WWPN_LINE_SZ];
9983 __be64 wwn;
9984 u64 wwpn;
9985 int i, len;
9986 int line = 0;
9987 int wcnt = 0;
9988 bool endit = false;
9989
9990 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:");
9991 for (i = 0; i < cnt; i++) {
9992 /* Are we on the last WWPN */
9993 if (i == (cnt - 1))
9994 endit = true;
9995
9996 /* Extract the next WWPN from the payload */
9997 wwn = *wwnlist++;
9998 wwpn = be64_to_cpu(wwn);
9999 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len,
10000 " %016llx", wwpn);
10001
10002 /* Log a message if we are on the last WWPN
10003 * or if we hit the max allowed per message.
10004 */
10005 wcnt++;
10006 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) {
10007 buf[len] = 0;
10008 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10009 "4686 %s\n", buf);
10010
10011 /* Check if we reached the last WWPN */
10012 if (endit)
10013 return;
10014
10015 /* Limit the number of log message displayed per FPIN */
10016 line++;
10017 if (line == LPFC_FPIN_WWPN_NUM_LINE) {
10018 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10019 "4687 %d WWPNs Truncated\n",
10020 cnt - i - 1);
10021 return;
10022 }
10023
10024 /* Start over with next log message */
10025 wcnt = 0;
10026 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ,
10027 "Additional WWPNs:");
10028 }
10029 }
10030 }
10031
10032 /**
10033 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event.
10034 * @phba: Pointer to phba object.
10035 * @tlv: Pointer to the Link Integrity Notification Descriptor.
10036 *
10037 * This function processes a Link Integrity FPIN event by logging a message.
10038 **/
10039 static void
lpfc_els_rcv_fpin_li(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)10040 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
10041 {
10042 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv;
10043 const char *li_evt_str;
10044 u32 li_evt, cnt;
10045
10046 li_evt = be16_to_cpu(li->event_type);
10047 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt);
10048 cnt = be32_to_cpu(li->pname_count);
10049
10050 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10051 "4680 FPIN Link Integrity %s (x%x) "
10052 "Detecting PN x%016llx Attached PN x%016llx "
10053 "Duration %d mSecs Count %d Port Cnt %d\n",
10054 li_evt_str, li_evt,
10055 be64_to_cpu(li->detecting_wwpn),
10056 be64_to_cpu(li->attached_wwpn),
10057 be32_to_cpu(li->event_threshold),
10058 be32_to_cpu(li->event_count), cnt);
10059
10060 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt);
10061 }
10062
10063 /**
10064 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event.
10065 * @phba: Pointer to hba object.
10066 * @tlv: Pointer to the Delivery Notification Descriptor TLV
10067 *
10068 * This function processes a Delivery FPIN event by logging a message.
10069 **/
10070 static void
lpfc_els_rcv_fpin_del(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)10071 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
10072 {
10073 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv;
10074 const char *del_rsn_str;
10075 u32 del_rsn;
10076 __be32 *frame;
10077
10078 del_rsn = be16_to_cpu(del->deli_reason_code);
10079 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn);
10080
10081 /* Skip over desc_tag/desc_len header to payload */
10082 frame = (__be32 *)(del + 1);
10083
10084 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10085 "4681 FPIN Delivery %s (x%x) "
10086 "Detecting PN x%016llx Attached PN x%016llx "
10087 "DiscHdr0 x%08x "
10088 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x "
10089 "DiscHdr4 x%08x DiscHdr5 x%08x\n",
10090 del_rsn_str, del_rsn,
10091 be64_to_cpu(del->detecting_wwpn),
10092 be64_to_cpu(del->attached_wwpn),
10093 be32_to_cpu(frame[0]),
10094 be32_to_cpu(frame[1]),
10095 be32_to_cpu(frame[2]),
10096 be32_to_cpu(frame[3]),
10097 be32_to_cpu(frame[4]),
10098 be32_to_cpu(frame[5]));
10099 }
10100
10101 /**
10102 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event.
10103 * @phba: Pointer to hba object.
10104 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV
10105 *
10106 * This function processes a Peer Congestion FPIN event by logging a message.
10107 **/
10108 static void
lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)10109 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
10110 {
10111 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv;
10112 const char *pc_evt_str;
10113 u32 pc_evt, cnt;
10114
10115 pc_evt = be16_to_cpu(pc->event_type);
10116 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt);
10117 cnt = be32_to_cpu(pc->pname_count);
10118
10119 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS,
10120 "4684 FPIN Peer Congestion %s (x%x) "
10121 "Duration %d mSecs "
10122 "Detecting PN x%016llx Attached PN x%016llx "
10123 "Impacted Port Cnt %d\n",
10124 pc_evt_str, pc_evt,
10125 be32_to_cpu(pc->event_period),
10126 be64_to_cpu(pc->detecting_wwpn),
10127 be64_to_cpu(pc->attached_wwpn),
10128 cnt);
10129
10130 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt);
10131 }
10132
10133 /**
10134 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification
10135 * @phba: Pointer to hba object.
10136 * @tlv: Pointer to the Congestion Notification Descriptor TLV
10137 *
10138 * This function processes an FPIN Congestion Notifiction. The notification
10139 * could be an Alarm or Warning. This routine feeds that data into driver's
10140 * running congestion algorithm. It also processes the FPIN by
10141 * logging a message. It returns 1 to indicate deliver this message
10142 * to the upper layer or 0 to indicate don't deliver it.
10143 **/
10144 static int
lpfc_els_rcv_fpin_cgn(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)10145 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
10146 {
10147 struct lpfc_cgn_info *cp;
10148 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv;
10149 const char *cgn_evt_str;
10150 u32 cgn_evt;
10151 const char *cgn_sev_str;
10152 u32 cgn_sev;
10153 uint16_t value;
10154 u32 crc;
10155 bool nm_log = false;
10156 int rc = 1;
10157
10158 cgn_evt = be16_to_cpu(cgn->event_type);
10159 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt);
10160 cgn_sev = cgn->severity;
10161 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev);
10162
10163 /* The driver only takes action on a Credit Stall or Oversubscription
10164 * event type to engage the IO algorithm. The driver prints an
10165 * unmaskable message only for Lost Credit and Credit Stall.
10166 * TODO: Still need to have definition of host action on clear,
10167 * lost credit and device specific event types.
10168 */
10169 switch (cgn_evt) {
10170 case FPIN_CONGN_LOST_CREDIT:
10171 nm_log = true;
10172 break;
10173 case FPIN_CONGN_CREDIT_STALL:
10174 nm_log = true;
10175 fallthrough;
10176 case FPIN_CONGN_OVERSUBSCRIPTION:
10177 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION)
10178 nm_log = false;
10179 switch (cgn_sev) {
10180 case FPIN_CONGN_SEVERITY_ERROR:
10181 /* Take action here for an Alarm event */
10182 if (phba->cmf_active_mode != LPFC_CFG_OFF) {
10183 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) {
10184 /* Track of alarm cnt for SYNC_WQE */
10185 atomic_inc(&phba->cgn_sync_alarm_cnt);
10186 }
10187 /* Track alarm cnt for cgn_info regardless
10188 * of whether CMF is configured for Signals
10189 * or FPINs.
10190 */
10191 atomic_inc(&phba->cgn_fabric_alarm_cnt);
10192 goto cleanup;
10193 }
10194 break;
10195 case FPIN_CONGN_SEVERITY_WARNING:
10196 /* Take action here for a Warning event */
10197 if (phba->cmf_active_mode != LPFC_CFG_OFF) {
10198 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) {
10199 /* Track of warning cnt for SYNC_WQE */
10200 atomic_inc(&phba->cgn_sync_warn_cnt);
10201 }
10202 /* Track warning cnt and freq for cgn_info
10203 * regardless of whether CMF is configured for
10204 * Signals or FPINs.
10205 */
10206 atomic_inc(&phba->cgn_fabric_warn_cnt);
10207 cleanup:
10208 /* Save frequency in ms */
10209 phba->cgn_fpin_frequency =
10210 be32_to_cpu(cgn->event_period);
10211 value = phba->cgn_fpin_frequency;
10212 if (phba->cgn_i) {
10213 cp = (struct lpfc_cgn_info *)
10214 phba->cgn_i->virt;
10215 cp->cgn_alarm_freq =
10216 cpu_to_le16(value);
10217 cp->cgn_warn_freq =
10218 cpu_to_le16(value);
10219 crc = lpfc_cgn_calc_crc32
10220 (cp,
10221 LPFC_CGN_INFO_SZ,
10222 LPFC_CGN_CRC32_SEED);
10223 cp->cgn_info_crc = cpu_to_le32(crc);
10224 }
10225
10226 /* Don't deliver to upper layer since
10227 * driver took action on this tlv.
10228 */
10229 rc = 0;
10230 }
10231 break;
10232 }
10233 break;
10234 }
10235
10236 /* Change the log level to unmaskable for the following event types. */
10237 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO),
10238 LOG_CGN_MGMT | LOG_ELS,
10239 "4683 FPIN CONGESTION %s type %s (x%x) Event "
10240 "Duration %d mSecs\n",
10241 cgn_sev_str, cgn_evt_str, cgn_evt,
10242 be32_to_cpu(cgn->event_period));
10243 return rc;
10244 }
10245
10246 void
lpfc_els_rcv_fpin(struct lpfc_vport * vport,void * p,u32 fpin_length)10247 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length)
10248 {
10249 struct lpfc_hba *phba = vport->phba;
10250 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p;
10251 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv;
10252 const char *dtag_nm;
10253 int desc_cnt = 0, bytes_remain, cnt;
10254 u32 dtag, deliver = 0;
10255 int len;
10256
10257 /* FPINs handled only if we are in the right discovery state */
10258 if (vport->port_state < LPFC_DISC_AUTH)
10259 return;
10260
10261 /* make sure there is the full fpin header */
10262 if (fpin_length < sizeof(struct fc_els_fpin))
10263 return;
10264
10265 /* Sanity check descriptor length. The desc_len value does not
10266 * include space for the ELS command and the desc_len fields.
10267 */
10268 len = be32_to_cpu(fpin->desc_len);
10269 if (fpin_length < len + sizeof(struct fc_els_fpin)) {
10270 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
10271 "4671 Bad ELS FPIN length %d: %d\n",
10272 len, fpin_length);
10273 return;
10274 }
10275
10276 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
10277 first_tlv = tlv;
10278 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc);
10279 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
10280
10281 /* process each descriptor separately */
10282 while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
10283 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
10284 dtag = be32_to_cpu(tlv->desc_tag);
10285 switch (dtag) {
10286 case ELS_DTAG_LNK_INTEGRITY:
10287 lpfc_els_rcv_fpin_li(phba, tlv);
10288 deliver = 1;
10289 break;
10290 case ELS_DTAG_DELIVERY:
10291 lpfc_els_rcv_fpin_del(phba, tlv);
10292 deliver = 1;
10293 break;
10294 case ELS_DTAG_PEER_CONGEST:
10295 lpfc_els_rcv_fpin_peer_cgn(phba, tlv);
10296 deliver = 1;
10297 break;
10298 case ELS_DTAG_CONGESTION:
10299 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv);
10300 break;
10301 default:
10302 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
10303 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
10304 "4678 unknown FPIN descriptor[%d]: "
10305 "tag x%x (%s)\n",
10306 desc_cnt, dtag, dtag_nm);
10307
10308 /* If descriptor is bad, drop the rest of the data */
10309 return;
10310 }
10311 lpfc_cgn_update_stat(phba, dtag);
10312 cnt = be32_to_cpu(tlv->desc_len);
10313
10314 /* Sanity check descriptor length. The desc_len value does not
10315 * include space for the desc_tag and the desc_len fields.
10316 */
10317 len -= (cnt + sizeof(struct fc_tlv_desc));
10318 if (len < 0) {
10319 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
10320 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
10321 "4672 Bad FPIN descriptor TLV length "
10322 "%d: %d %d %s\n",
10323 cnt, len, fpin_length, dtag_nm);
10324 return;
10325 }
10326
10327 current_tlv = tlv;
10328 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
10329 tlv = fc_tlv_next_desc(tlv);
10330
10331 /* Format payload such that the FPIN delivered to the
10332 * upper layer is a single descriptor FPIN.
10333 */
10334 if (desc_cnt)
10335 memcpy(first_tlv, current_tlv,
10336 (cnt + sizeof(struct fc_els_fpin)));
10337
10338 /* Adjust the length so that it only reflects a
10339 * single descriptor FPIN.
10340 */
10341 fpin_length = cnt + sizeof(struct fc_els_fpin);
10342 fpin->desc_len = cpu_to_be32(fpin_length);
10343 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */
10344
10345 /* Send every descriptor individually to the upper layer */
10346 if (deliver)
10347 fc_host_fpin_rcv(lpfc_shost_from_vport(vport),
10348 fpin_length, (char *)fpin, 0);
10349 desc_cnt++;
10350 }
10351 }
10352
10353 /**
10354 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
10355 * @phba: pointer to lpfc hba data structure.
10356 * @pring: pointer to a SLI ring.
10357 * @vport: pointer to a host virtual N_Port data structure.
10358 * @elsiocb: pointer to lpfc els command iocb data structure.
10359 *
10360 * This routine is used for processing the IOCB associated with a unsolicited
10361 * event. It first determines whether there is an existing ndlp that matches
10362 * the DID from the unsolicited IOCB. If not, it will create a new one with
10363 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
10364 * IOCB is then used to invoke the proper routine and to set up proper state
10365 * of the discovery state machine.
10366 **/
10367 static void
lpfc_els_unsol_buffer(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_vport * vport,struct lpfc_iocbq * elsiocb)10368 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10369 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
10370 {
10371 struct lpfc_nodelist *ndlp;
10372 struct ls_rjt stat;
10373 u32 *payload, payload_len;
10374 u32 cmd = 0, did = 0, newnode, status = 0;
10375 uint8_t rjt_exp, rjt_err = 0, init_link = 0;
10376 struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
10377 LPFC_MBOXQ_t *mbox;
10378
10379 if (!vport || !elsiocb->cmd_dmabuf)
10380 goto dropit;
10381
10382 newnode = 0;
10383 wcqe_cmpl = &elsiocb->wcqe_cmpl;
10384 payload = elsiocb->cmd_dmabuf->virt;
10385 if (phba->sli_rev == LPFC_SLI_REV4)
10386 payload_len = wcqe_cmpl->total_data_placed;
10387 else
10388 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
10389 status = get_job_ulpstatus(phba, elsiocb);
10390 cmd = *payload;
10391 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
10392 lpfc_sli3_post_buffer(phba, pring, 1);
10393
10394 did = get_job_els_rsp64_did(phba, elsiocb);
10395 if (status) {
10396 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10397 "RCV Unsol ELS: status:x%x/x%x did:x%x",
10398 status, get_job_word4(phba, elsiocb), did);
10399 goto dropit;
10400 }
10401
10402 /* Check to see if link went down during discovery */
10403 if (lpfc_els_chk_latt(vport))
10404 goto dropit;
10405
10406 /* Ignore traffic received during vport shutdown. */
10407 if (vport->load_flag & FC_UNLOADING)
10408 goto dropit;
10409
10410 /* If NPort discovery is delayed drop incoming ELS */
10411 if ((vport->fc_flag & FC_DISC_DELAYED) &&
10412 (cmd != ELS_CMD_PLOGI))
10413 goto dropit;
10414
10415 ndlp = lpfc_findnode_did(vport, did);
10416 if (!ndlp) {
10417 /* Cannot find existing Fabric ndlp, so allocate a new one */
10418 ndlp = lpfc_nlp_init(vport, did);
10419 if (!ndlp)
10420 goto dropit;
10421 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
10422 newnode = 1;
10423 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
10424 ndlp->nlp_type |= NLP_FABRIC;
10425 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
10426 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
10427 newnode = 1;
10428 }
10429
10430 phba->fc_stat.elsRcvFrame++;
10431
10432 /*
10433 * Do not process any unsolicited ELS commands
10434 * if the ndlp is in DEV_LOSS
10435 */
10436 spin_lock_irq(&ndlp->lock);
10437 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
10438 spin_unlock_irq(&ndlp->lock);
10439 if (newnode)
10440 lpfc_nlp_put(ndlp);
10441 goto dropit;
10442 }
10443 spin_unlock_irq(&ndlp->lock);
10444
10445 elsiocb->ndlp = lpfc_nlp_get(ndlp);
10446 if (!elsiocb->ndlp)
10447 goto dropit;
10448 elsiocb->vport = vport;
10449
10450 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
10451 cmd &= ELS_CMD_MASK;
10452 }
10453 /* ELS command <elsCmd> received from NPORT <did> */
10454 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
10455 "0112 ELS command x%x received from NPORT x%x "
10456 "refcnt %d Data: x%x x%x x%x x%x\n",
10457 cmd, did, kref_read(&ndlp->kref), vport->port_state,
10458 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID);
10459
10460 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
10461 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
10462 (cmd != ELS_CMD_FLOGI) &&
10463 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) {
10464 rjt_err = LSRJT_LOGICAL_BSY;
10465 rjt_exp = LSEXP_NOTHING_MORE;
10466 goto lsrjt;
10467 }
10468
10469 switch (cmd) {
10470 case ELS_CMD_PLOGI:
10471 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10472 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
10473 did, vport->port_state, ndlp->nlp_flag);
10474
10475 phba->fc_stat.elsRcvPLOGI++;
10476 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
10477 if (phba->sli_rev == LPFC_SLI_REV4 &&
10478 (phba->pport->fc_flag & FC_PT2PT)) {
10479 vport->fc_prevDID = vport->fc_myDID;
10480 /* Our DID needs to be updated before registering
10481 * the vfi. This is done in lpfc_rcv_plogi but
10482 * that is called after the reg_vfi.
10483 */
10484 vport->fc_myDID =
10485 bf_get(els_rsp64_sid,
10486 &elsiocb->wqe.xmit_els_rsp);
10487 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
10488 "3312 Remote port assigned DID x%x "
10489 "%x\n", vport->fc_myDID,
10490 vport->fc_prevDID);
10491 }
10492
10493 lpfc_send_els_event(vport, ndlp, payload);
10494
10495 /* If Nport discovery is delayed, reject PLOGIs */
10496 if (vport->fc_flag & FC_DISC_DELAYED) {
10497 rjt_err = LSRJT_UNABLE_TPC;
10498 rjt_exp = LSEXP_NOTHING_MORE;
10499 break;
10500 }
10501
10502 if (vport->port_state < LPFC_DISC_AUTH) {
10503 if (!(phba->pport->fc_flag & FC_PT2PT) ||
10504 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
10505 rjt_err = LSRJT_UNABLE_TPC;
10506 rjt_exp = LSEXP_NOTHING_MORE;
10507 break;
10508 }
10509 }
10510
10511 spin_lock_irq(&ndlp->lock);
10512 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
10513 spin_unlock_irq(&ndlp->lock);
10514
10515 lpfc_disc_state_machine(vport, ndlp, elsiocb,
10516 NLP_EVT_RCV_PLOGI);
10517
10518 break;
10519 case ELS_CMD_FLOGI:
10520 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10521 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
10522 did, vport->port_state, ndlp->nlp_flag);
10523
10524 phba->fc_stat.elsRcvFLOGI++;
10525
10526 /* If the driver believes fabric discovery is done and is ready,
10527 * bounce the link. There is some descrepancy.
10528 */
10529 if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
10530 vport->fc_flag & FC_PT2PT &&
10531 vport->rcv_flogi_cnt >= 1) {
10532 rjt_err = LSRJT_LOGICAL_BSY;
10533 rjt_exp = LSEXP_NOTHING_MORE;
10534 init_link++;
10535 goto lsrjt;
10536 }
10537
10538 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
10539 /* retain node if our response is deferred */
10540 if (phba->defer_flogi_acc_flag)
10541 break;
10542 if (newnode)
10543 lpfc_disc_state_machine(vport, ndlp, NULL,
10544 NLP_EVT_DEVICE_RM);
10545 break;
10546 case ELS_CMD_LOGO:
10547 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10548 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
10549 did, vport->port_state, ndlp->nlp_flag);
10550
10551 phba->fc_stat.elsRcvLOGO++;
10552 lpfc_send_els_event(vport, ndlp, payload);
10553 if (vport->port_state < LPFC_DISC_AUTH) {
10554 rjt_err = LSRJT_UNABLE_TPC;
10555 rjt_exp = LSEXP_NOTHING_MORE;
10556 break;
10557 }
10558 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
10559 if (newnode)
10560 lpfc_disc_state_machine(vport, ndlp, NULL,
10561 NLP_EVT_DEVICE_RM);
10562 break;
10563 case ELS_CMD_PRLO:
10564 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10565 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
10566 did, vport->port_state, ndlp->nlp_flag);
10567
10568 phba->fc_stat.elsRcvPRLO++;
10569 lpfc_send_els_event(vport, ndlp, payload);
10570 if (vport->port_state < LPFC_DISC_AUTH) {
10571 rjt_err = LSRJT_UNABLE_TPC;
10572 rjt_exp = LSEXP_NOTHING_MORE;
10573 break;
10574 }
10575 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
10576 break;
10577 case ELS_CMD_LCB:
10578 phba->fc_stat.elsRcvLCB++;
10579 lpfc_els_rcv_lcb(vport, elsiocb, ndlp);
10580 break;
10581 case ELS_CMD_RDP:
10582 phba->fc_stat.elsRcvRDP++;
10583 lpfc_els_rcv_rdp(vport, elsiocb, ndlp);
10584 break;
10585 case ELS_CMD_RSCN:
10586 phba->fc_stat.elsRcvRSCN++;
10587 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
10588 if (newnode)
10589 lpfc_disc_state_machine(vport, ndlp, NULL,
10590 NLP_EVT_DEVICE_RM);
10591 break;
10592 case ELS_CMD_ADISC:
10593 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10594 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
10595 did, vport->port_state, ndlp->nlp_flag);
10596
10597 lpfc_send_els_event(vport, ndlp, payload);
10598 phba->fc_stat.elsRcvADISC++;
10599 if (vport->port_state < LPFC_DISC_AUTH) {
10600 rjt_err = LSRJT_UNABLE_TPC;
10601 rjt_exp = LSEXP_NOTHING_MORE;
10602 break;
10603 }
10604 lpfc_disc_state_machine(vport, ndlp, elsiocb,
10605 NLP_EVT_RCV_ADISC);
10606 break;
10607 case ELS_CMD_PDISC:
10608 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10609 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
10610 did, vport->port_state, ndlp->nlp_flag);
10611
10612 phba->fc_stat.elsRcvPDISC++;
10613 if (vport->port_state < LPFC_DISC_AUTH) {
10614 rjt_err = LSRJT_UNABLE_TPC;
10615 rjt_exp = LSEXP_NOTHING_MORE;
10616 break;
10617 }
10618 lpfc_disc_state_machine(vport, ndlp, elsiocb,
10619 NLP_EVT_RCV_PDISC);
10620 break;
10621 case ELS_CMD_FARPR:
10622 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10623 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
10624 did, vport->port_state, ndlp->nlp_flag);
10625
10626 phba->fc_stat.elsRcvFARPR++;
10627 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
10628 break;
10629 case ELS_CMD_FARP:
10630 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10631 "RCV FARP: did:x%x/ste:x%x flg:x%x",
10632 did, vport->port_state, ndlp->nlp_flag);
10633
10634 phba->fc_stat.elsRcvFARP++;
10635 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
10636 break;
10637 case ELS_CMD_FAN:
10638 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10639 "RCV FAN: did:x%x/ste:x%x flg:x%x",
10640 did, vport->port_state, ndlp->nlp_flag);
10641
10642 phba->fc_stat.elsRcvFAN++;
10643 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
10644 break;
10645 case ELS_CMD_PRLI:
10646 case ELS_CMD_NVMEPRLI:
10647 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10648 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
10649 did, vport->port_state, ndlp->nlp_flag);
10650
10651 phba->fc_stat.elsRcvPRLI++;
10652 if ((vport->port_state < LPFC_DISC_AUTH) &&
10653 (vport->fc_flag & FC_FABRIC)) {
10654 rjt_err = LSRJT_UNABLE_TPC;
10655 rjt_exp = LSEXP_NOTHING_MORE;
10656 break;
10657 }
10658 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
10659 break;
10660 case ELS_CMD_LIRR:
10661 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10662 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
10663 did, vport->port_state, ndlp->nlp_flag);
10664
10665 phba->fc_stat.elsRcvLIRR++;
10666 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
10667 if (newnode)
10668 lpfc_disc_state_machine(vport, ndlp, NULL,
10669 NLP_EVT_DEVICE_RM);
10670 break;
10671 case ELS_CMD_RLS:
10672 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10673 "RCV RLS: did:x%x/ste:x%x flg:x%x",
10674 did, vport->port_state, ndlp->nlp_flag);
10675
10676 phba->fc_stat.elsRcvRLS++;
10677 lpfc_els_rcv_rls(vport, elsiocb, ndlp);
10678 if (newnode)
10679 lpfc_disc_state_machine(vport, ndlp, NULL,
10680 NLP_EVT_DEVICE_RM);
10681 break;
10682 case ELS_CMD_RPL:
10683 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10684 "RCV RPL: did:x%x/ste:x%x flg:x%x",
10685 did, vport->port_state, ndlp->nlp_flag);
10686
10687 phba->fc_stat.elsRcvRPL++;
10688 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
10689 if (newnode)
10690 lpfc_disc_state_machine(vport, ndlp, NULL,
10691 NLP_EVT_DEVICE_RM);
10692 break;
10693 case ELS_CMD_RNID:
10694 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10695 "RCV RNID: did:x%x/ste:x%x flg:x%x",
10696 did, vport->port_state, ndlp->nlp_flag);
10697
10698 phba->fc_stat.elsRcvRNID++;
10699 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
10700 if (newnode)
10701 lpfc_disc_state_machine(vport, ndlp, NULL,
10702 NLP_EVT_DEVICE_RM);
10703 break;
10704 case ELS_CMD_RTV:
10705 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10706 "RCV RTV: did:x%x/ste:x%x flg:x%x",
10707 did, vport->port_state, ndlp->nlp_flag);
10708 phba->fc_stat.elsRcvRTV++;
10709 lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
10710 if (newnode)
10711 lpfc_disc_state_machine(vport, ndlp, NULL,
10712 NLP_EVT_DEVICE_RM);
10713 break;
10714 case ELS_CMD_RRQ:
10715 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10716 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
10717 did, vport->port_state, ndlp->nlp_flag);
10718
10719 phba->fc_stat.elsRcvRRQ++;
10720 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
10721 if (newnode)
10722 lpfc_disc_state_machine(vport, ndlp, NULL,
10723 NLP_EVT_DEVICE_RM);
10724 break;
10725 case ELS_CMD_ECHO:
10726 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10727 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
10728 did, vport->port_state, ndlp->nlp_flag);
10729
10730 phba->fc_stat.elsRcvECHO++;
10731 lpfc_els_rcv_echo(vport, elsiocb, ndlp);
10732 if (newnode)
10733 lpfc_disc_state_machine(vport, ndlp, NULL,
10734 NLP_EVT_DEVICE_RM);
10735 break;
10736 case ELS_CMD_REC:
10737 /* receive this due to exchange closed */
10738 rjt_err = LSRJT_UNABLE_TPC;
10739 rjt_exp = LSEXP_INVALID_OX_RX;
10740 break;
10741 case ELS_CMD_FPIN:
10742 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10743 "RCV FPIN: did:x%x/ste:x%x flg:x%x",
10744 did, vport->port_state, ndlp->nlp_flag);
10745
10746 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload,
10747 payload_len);
10748
10749 /* There are no replies, so no rjt codes */
10750 break;
10751 case ELS_CMD_EDC:
10752 lpfc_els_rcv_edc(vport, elsiocb, ndlp);
10753 break;
10754 case ELS_CMD_RDF:
10755 phba->fc_stat.elsRcvRDF++;
10756 /* Accept RDF only from fabric controller */
10757 if (did != Fabric_Cntl_DID) {
10758 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
10759 "1115 Received RDF from invalid DID "
10760 "x%x\n", did);
10761 rjt_err = LSRJT_PROTOCOL_ERR;
10762 rjt_exp = LSEXP_NOTHING_MORE;
10763 goto lsrjt;
10764 }
10765
10766 lpfc_els_rcv_rdf(vport, elsiocb, ndlp);
10767 break;
10768 default:
10769 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10770 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
10771 cmd, did, vport->port_state);
10772
10773 /* Unsupported ELS command, reject */
10774 rjt_err = LSRJT_CMD_UNSUPPORTED;
10775 rjt_exp = LSEXP_NOTHING_MORE;
10776
10777 /* Unknown ELS command <elsCmd> received from NPORT <did> */
10778 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10779 "0115 Unknown ELS command x%x "
10780 "received from NPORT x%x\n", cmd, did);
10781 if (newnode)
10782 lpfc_disc_state_machine(vport, ndlp, NULL,
10783 NLP_EVT_DEVICE_RM);
10784 break;
10785 }
10786
10787 lsrjt:
10788 /* check if need to LS_RJT received ELS cmd */
10789 if (rjt_err) {
10790 memset(&stat, 0, sizeof(stat));
10791 stat.un.b.lsRjtRsnCode = rjt_err;
10792 stat.un.b.lsRjtRsnCodeExp = rjt_exp;
10793 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
10794 NULL);
10795 /* Remove the reference from above for new nodes. */
10796 if (newnode)
10797 lpfc_disc_state_machine(vport, ndlp, NULL,
10798 NLP_EVT_DEVICE_RM);
10799 }
10800
10801 /* Release the reference on this elsiocb, not the ndlp. */
10802 lpfc_nlp_put(elsiocb->ndlp);
10803 elsiocb->ndlp = NULL;
10804
10805 /* Special case. Driver received an unsolicited command that
10806 * unsupportable given the driver's current state. Reset the
10807 * link and start over.
10808 */
10809 if (init_link) {
10810 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10811 if (!mbox)
10812 return;
10813 lpfc_linkdown(phba);
10814 lpfc_init_link(phba, mbox,
10815 phba->cfg_topology,
10816 phba->cfg_link_speed);
10817 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
10818 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10819 mbox->vport = vport;
10820 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
10821 MBX_NOT_FINISHED)
10822 mempool_free(mbox, phba->mbox_mem_pool);
10823 }
10824
10825 return;
10826
10827 dropit:
10828 if (vport && !(vport->load_flag & FC_UNLOADING))
10829 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10830 "0111 Dropping received ELS cmd "
10831 "Data: x%x x%x x%x x%x\n",
10832 cmd, status, get_job_word4(phba, elsiocb), did);
10833
10834 phba->fc_stat.elsRcvDrop++;
10835 }
10836
10837 /**
10838 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
10839 * @phba: pointer to lpfc hba data structure.
10840 * @pring: pointer to a SLI ring.
10841 * @elsiocb: pointer to lpfc els iocb data structure.
10842 *
10843 * This routine is used to process an unsolicited event received from a SLI
10844 * (Service Level Interface) ring. The actual processing of the data buffer
10845 * associated with the unsolicited event is done by invoking the routine
10846 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
10847 * SLI ring on which the unsolicited event was received.
10848 **/
10849 void
lpfc_els_unsol_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * elsiocb)10850 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10851 struct lpfc_iocbq *elsiocb)
10852 {
10853 struct lpfc_vport *vport = elsiocb->vport;
10854 u32 ulp_command, status, parameter, bde_count = 0;
10855 IOCB_t *icmd;
10856 struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
10857 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf;
10858 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf;
10859 dma_addr_t paddr;
10860
10861 elsiocb->cmd_dmabuf = NULL;
10862 elsiocb->rsp_dmabuf = NULL;
10863 elsiocb->bpl_dmabuf = NULL;
10864
10865 wcqe_cmpl = &elsiocb->wcqe_cmpl;
10866 ulp_command = get_job_cmnd(phba, elsiocb);
10867 status = get_job_ulpstatus(phba, elsiocb);
10868 parameter = get_job_word4(phba, elsiocb);
10869 if (phba->sli_rev == LPFC_SLI_REV4)
10870 bde_count = wcqe_cmpl->word3;
10871 else
10872 bde_count = elsiocb->iocb.ulpBdeCount;
10873
10874 if (status == IOSTAT_NEED_BUFFER) {
10875 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
10876 } else if (status == IOSTAT_LOCAL_REJECT &&
10877 (parameter & IOERR_PARAM_MASK) ==
10878 IOERR_RCV_BUFFER_WAITING) {
10879 phba->fc_stat.NoRcvBuf++;
10880 /* Not enough posted buffers; Try posting more buffers */
10881 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
10882 lpfc_sli3_post_buffer(phba, pring, 0);
10883 return;
10884 }
10885
10886 if (phba->sli_rev == LPFC_SLI_REV3) {
10887 icmd = &elsiocb->iocb;
10888 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
10889 (ulp_command == CMD_IOCB_RCV_ELS64_CX ||
10890 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) {
10891 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
10892 vport = phba->pport;
10893 else
10894 vport = lpfc_find_vport_by_vpid(phba,
10895 icmd->unsli3.rcvsli3.vpi);
10896 }
10897 }
10898
10899 /* If there are no BDEs associated
10900 * with this IOCB, there is nothing to do.
10901 */
10902 if (bde_count == 0)
10903 return;
10904
10905 /* Account for SLI2 or SLI3 and later unsolicited buffering */
10906 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
10907 elsiocb->cmd_dmabuf = bdeBuf1;
10908 if (bde_count == 2)
10909 elsiocb->bpl_dmabuf = bdeBuf2;
10910 } else {
10911 icmd = &elsiocb->iocb;
10912 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
10913 icmd->un.cont64[0].addrLow);
10914 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
10915 paddr);
10916 if (bde_count == 2) {
10917 paddr = getPaddr(icmd->un.cont64[1].addrHigh,
10918 icmd->un.cont64[1].addrLow);
10919 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
10920 pring,
10921 paddr);
10922 }
10923 }
10924
10925 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
10926 /*
10927 * The different unsolicited event handlers would tell us
10928 * if they are done with "mp" by setting cmd_dmabuf to NULL.
10929 */
10930 if (elsiocb->cmd_dmabuf) {
10931 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf);
10932 elsiocb->cmd_dmabuf = NULL;
10933 }
10934
10935 if (elsiocb->bpl_dmabuf) {
10936 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf);
10937 elsiocb->bpl_dmabuf = NULL;
10938 }
10939
10940 }
10941
10942 static void
lpfc_start_fdmi(struct lpfc_vport * vport)10943 lpfc_start_fdmi(struct lpfc_vport *vport)
10944 {
10945 struct lpfc_nodelist *ndlp;
10946
10947 /* If this is the first time, allocate an ndlp and initialize
10948 * it. Otherwise, make sure the node is enabled and then do the
10949 * login.
10950 */
10951 ndlp = lpfc_findnode_did(vport, FDMI_DID);
10952 if (!ndlp) {
10953 ndlp = lpfc_nlp_init(vport, FDMI_DID);
10954 if (ndlp) {
10955 ndlp->nlp_type |= NLP_FABRIC;
10956 } else {
10957 return;
10958 }
10959 }
10960
10961 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
10962 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
10963 }
10964
10965 /**
10966 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
10967 * @phba: pointer to lpfc hba data structure.
10968 * @vport: pointer to a virtual N_Port data structure.
10969 *
10970 * This routine issues a Port Login (PLOGI) to the Name Server with
10971 * State Change Request (SCR) for a @vport. This routine will create an
10972 * ndlp for the Name Server associated to the @vport if such node does
10973 * not already exist. The PLOGI to Name Server is issued by invoking the
10974 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
10975 * (FDMI) is configured to the @vport, a FDMI node will be created and
10976 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
10977 **/
10978 void
lpfc_do_scr_ns_plogi(struct lpfc_hba * phba,struct lpfc_vport * vport)10979 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
10980 {
10981 struct lpfc_nodelist *ndlp;
10982 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
10983
10984 /*
10985 * If lpfc_delay_discovery parameter is set and the clean address
10986 * bit is cleared and fc fabric parameters chenged, delay FC NPort
10987 * discovery.
10988 */
10989 spin_lock_irq(shost->host_lock);
10990 if (vport->fc_flag & FC_DISC_DELAYED) {
10991 spin_unlock_irq(shost->host_lock);
10992 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10993 "3334 Delay fc port discovery for %d secs\n",
10994 phba->fc_ratov);
10995 mod_timer(&vport->delayed_disc_tmo,
10996 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
10997 return;
10998 }
10999 spin_unlock_irq(shost->host_lock);
11000
11001 ndlp = lpfc_findnode_did(vport, NameServer_DID);
11002 if (!ndlp) {
11003 ndlp = lpfc_nlp_init(vport, NameServer_DID);
11004 if (!ndlp) {
11005 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
11006 lpfc_disc_start(vport);
11007 return;
11008 }
11009 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11010 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11011 "0251 NameServer login: no memory\n");
11012 return;
11013 }
11014 }
11015
11016 ndlp->nlp_type |= NLP_FABRIC;
11017
11018 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
11019
11020 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
11021 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11022 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11023 "0252 Cannot issue NameServer login\n");
11024 return;
11025 }
11026
11027 if ((phba->cfg_enable_SmartSAN ||
11028 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
11029 (vport->load_flag & FC_ALLOW_FDMI))
11030 lpfc_start_fdmi(vport);
11031 }
11032
11033 /**
11034 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
11035 * @phba: pointer to lpfc hba data structure.
11036 * @pmb: pointer to the driver internal queue element for mailbox command.
11037 *
11038 * This routine is the completion callback function to register new vport
11039 * mailbox command. If the new vport mailbox command completes successfully,
11040 * the fabric registration login shall be performed on physical port (the
11041 * new vport created is actually a physical port, with VPI 0) or the port
11042 * login to Name Server for State Change Request (SCR) will be performed
11043 * on virtual port (real virtual port, with VPI greater than 0).
11044 **/
11045 static void
lpfc_cmpl_reg_new_vport(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)11046 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
11047 {
11048 struct lpfc_vport *vport = pmb->vport;
11049 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11050 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
11051 MAILBOX_t *mb = &pmb->u.mb;
11052 int rc;
11053
11054 spin_lock_irq(shost->host_lock);
11055 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
11056 spin_unlock_irq(shost->host_lock);
11057
11058 if (mb->mbxStatus) {
11059 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11060 "0915 Register VPI failed : Status: x%x"
11061 " upd bit: x%x \n", mb->mbxStatus,
11062 mb->un.varRegVpi.upd);
11063 if (phba->sli_rev == LPFC_SLI_REV4 &&
11064 mb->un.varRegVpi.upd)
11065 goto mbox_err_exit ;
11066
11067 switch (mb->mbxStatus) {
11068 case 0x11: /* unsupported feature */
11069 case 0x9603: /* max_vpi exceeded */
11070 case 0x9602: /* Link event since CLEAR_LA */
11071 /* giving up on vport registration */
11072 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11073 spin_lock_irq(shost->host_lock);
11074 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
11075 spin_unlock_irq(shost->host_lock);
11076 lpfc_can_disctmo(vport);
11077 break;
11078 /* If reg_vpi fail with invalid VPI status, re-init VPI */
11079 case 0x20:
11080 spin_lock_irq(shost->host_lock);
11081 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
11082 spin_unlock_irq(shost->host_lock);
11083 lpfc_init_vpi(phba, pmb, vport->vpi);
11084 pmb->vport = vport;
11085 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
11086 rc = lpfc_sli_issue_mbox(phba, pmb,
11087 MBX_NOWAIT);
11088 if (rc == MBX_NOT_FINISHED) {
11089 lpfc_printf_vlog(vport, KERN_ERR,
11090 LOG_TRACE_EVENT,
11091 "2732 Failed to issue INIT_VPI"
11092 " mailbox command\n");
11093 } else {
11094 lpfc_nlp_put(ndlp);
11095 return;
11096 }
11097 fallthrough;
11098 default:
11099 /* Try to recover from this error */
11100 if (phba->sli_rev == LPFC_SLI_REV4)
11101 lpfc_sli4_unreg_all_rpis(vport);
11102 lpfc_mbx_unreg_vpi(vport);
11103 spin_lock_irq(shost->host_lock);
11104 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
11105 spin_unlock_irq(shost->host_lock);
11106 if (mb->mbxStatus == MBX_NOT_FINISHED)
11107 break;
11108 if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
11109 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
11110 if (phba->sli_rev == LPFC_SLI_REV4)
11111 lpfc_issue_init_vfi(vport);
11112 else
11113 lpfc_initial_flogi(vport);
11114 } else {
11115 lpfc_initial_fdisc(vport);
11116 }
11117 break;
11118 }
11119 } else {
11120 spin_lock_irq(shost->host_lock);
11121 vport->vpi_state |= LPFC_VPI_REGISTERED;
11122 spin_unlock_irq(shost->host_lock);
11123 if (vport == phba->pport) {
11124 if (phba->sli_rev < LPFC_SLI_REV4)
11125 lpfc_issue_fabric_reglogin(vport);
11126 else {
11127 /*
11128 * If the physical port is instantiated using
11129 * FDISC, do not start vport discovery.
11130 */
11131 if (vport->port_state != LPFC_FDISC)
11132 lpfc_start_fdiscs(phba);
11133 lpfc_do_scr_ns_plogi(phba, vport);
11134 }
11135 } else {
11136 lpfc_do_scr_ns_plogi(phba, vport);
11137 }
11138 }
11139 mbox_err_exit:
11140 /* Now, we decrement the ndlp reference count held for this
11141 * callback function
11142 */
11143 lpfc_nlp_put(ndlp);
11144
11145 mempool_free(pmb, phba->mbox_mem_pool);
11146
11147 /* reinitialize the VMID datastructure before returning.
11148 * this is specifically for vport
11149 */
11150 if (lpfc_is_vmid_enabled(phba))
11151 lpfc_reinit_vmid(vport);
11152 vport->vmid_flag = vport->phba->pport->vmid_flag;
11153
11154 return;
11155 }
11156
11157 /**
11158 * lpfc_register_new_vport - Register a new vport with a HBA
11159 * @phba: pointer to lpfc hba data structure.
11160 * @vport: pointer to a host virtual N_Port data structure.
11161 * @ndlp: pointer to a node-list data structure.
11162 *
11163 * This routine registers the @vport as a new virtual port with a HBA.
11164 * It is done through a registering vpi mailbox command.
11165 **/
11166 void
lpfc_register_new_vport(struct lpfc_hba * phba,struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)11167 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
11168 struct lpfc_nodelist *ndlp)
11169 {
11170 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11171 LPFC_MBOXQ_t *mbox;
11172
11173 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11174 if (mbox) {
11175 lpfc_reg_vpi(vport, mbox);
11176 mbox->vport = vport;
11177 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
11178 if (!mbox->ctx_ndlp) {
11179 mempool_free(mbox, phba->mbox_mem_pool);
11180 goto mbox_err_exit;
11181 }
11182
11183 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
11184 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
11185 == MBX_NOT_FINISHED) {
11186 /* mailbox command not success, decrement ndlp
11187 * reference count for this command
11188 */
11189 lpfc_nlp_put(ndlp);
11190 mempool_free(mbox, phba->mbox_mem_pool);
11191
11192 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11193 "0253 Register VPI: Can't send mbox\n");
11194 goto mbox_err_exit;
11195 }
11196 } else {
11197 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11198 "0254 Register VPI: no memory\n");
11199 goto mbox_err_exit;
11200 }
11201 return;
11202
11203 mbox_err_exit:
11204 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11205 spin_lock_irq(shost->host_lock);
11206 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
11207 spin_unlock_irq(shost->host_lock);
11208 return;
11209 }
11210
11211 /**
11212 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
11213 * @phba: pointer to lpfc hba data structure.
11214 *
11215 * This routine cancels the retry delay timers to all the vports.
11216 **/
11217 void
lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba * phba)11218 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
11219 {
11220 struct lpfc_vport **vports;
11221 struct lpfc_nodelist *ndlp;
11222 uint32_t link_state;
11223 int i;
11224
11225 /* Treat this failure as linkdown for all vports */
11226 link_state = phba->link_state;
11227 lpfc_linkdown(phba);
11228 phba->link_state = link_state;
11229
11230 vports = lpfc_create_vport_work_array(phba);
11231
11232 if (vports) {
11233 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
11234 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
11235 if (ndlp)
11236 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
11237 lpfc_els_flush_cmd(vports[i]);
11238 }
11239 lpfc_destroy_vport_work_array(phba, vports);
11240 }
11241 }
11242
11243 /**
11244 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
11245 * @phba: pointer to lpfc hba data structure.
11246 *
11247 * This routine abort all pending discovery commands and
11248 * start a timer to retry FLOGI for the physical port
11249 * discovery.
11250 **/
11251 void
lpfc_retry_pport_discovery(struct lpfc_hba * phba)11252 lpfc_retry_pport_discovery(struct lpfc_hba *phba)
11253 {
11254 struct lpfc_nodelist *ndlp;
11255
11256 /* Cancel the all vports retry delay retry timers */
11257 lpfc_cancel_all_vport_retry_delay_timer(phba);
11258
11259 /* If fabric require FLOGI, then re-instantiate physical login */
11260 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
11261 if (!ndlp)
11262 return;
11263
11264 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
11265 spin_lock_irq(&ndlp->lock);
11266 ndlp->nlp_flag |= NLP_DELAY_TMO;
11267 spin_unlock_irq(&ndlp->lock);
11268 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
11269 phba->pport->port_state = LPFC_FLOGI;
11270 return;
11271 }
11272
11273 /**
11274 * lpfc_fabric_login_reqd - Check if FLOGI required.
11275 * @phba: pointer to lpfc hba data structure.
11276 * @cmdiocb: pointer to FDISC command iocb.
11277 * @rspiocb: pointer to FDISC response iocb.
11278 *
11279 * This routine checks if a FLOGI is reguired for FDISC
11280 * to succeed.
11281 **/
11282 static int
lpfc_fabric_login_reqd(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11283 lpfc_fabric_login_reqd(struct lpfc_hba *phba,
11284 struct lpfc_iocbq *cmdiocb,
11285 struct lpfc_iocbq *rspiocb)
11286 {
11287 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
11288 u32 ulp_word4 = get_job_word4(phba, rspiocb);
11289
11290 if (ulp_status != IOSTAT_FABRIC_RJT ||
11291 ulp_word4 != RJT_LOGIN_REQUIRED)
11292 return 0;
11293 else
11294 return 1;
11295 }
11296
11297 /**
11298 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
11299 * @phba: pointer to lpfc hba data structure.
11300 * @cmdiocb: pointer to lpfc command iocb data structure.
11301 * @rspiocb: pointer to lpfc response iocb data structure.
11302 *
11303 * This routine is the completion callback function to a Fabric Discover
11304 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
11305 * single threaded, each FDISC completion callback function will reset
11306 * the discovery timer for all vports such that the timers will not get
11307 * unnecessary timeout. The function checks the FDISC IOCB status. If error
11308 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
11309 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
11310 * assigned to the vport has been changed with the completion of the FDISC
11311 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
11312 * are unregistered from the HBA, and then the lpfc_register_new_vport()
11313 * routine is invoked to register new vport with the HBA. Otherwise, the
11314 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
11315 * Server for State Change Request (SCR).
11316 **/
11317 static void
lpfc_cmpl_els_fdisc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11318 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11319 struct lpfc_iocbq *rspiocb)
11320 {
11321 struct lpfc_vport *vport = cmdiocb->vport;
11322 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11323 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
11324 struct lpfc_nodelist *np;
11325 struct lpfc_nodelist *next_np;
11326 struct lpfc_iocbq *piocb;
11327 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
11328 struct serv_parm *sp;
11329 uint8_t fabric_param_changed;
11330 u32 ulp_status, ulp_word4;
11331
11332 ulp_status = get_job_ulpstatus(phba, rspiocb);
11333 ulp_word4 = get_job_word4(phba, rspiocb);
11334
11335 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
11336 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
11337 ulp_status, ulp_word4,
11338 vport->fc_prevDID);
11339 /* Since all FDISCs are being single threaded, we
11340 * must reset the discovery timer for ALL vports
11341 * waiting to send FDISC when one completes.
11342 */
11343 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
11344 lpfc_set_disctmo(piocb->vport);
11345 }
11346
11347 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11348 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
11349 ulp_status, ulp_word4, vport->fc_prevDID);
11350
11351 if (ulp_status) {
11352
11353 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
11354 lpfc_retry_pport_discovery(phba);
11355 goto out;
11356 }
11357
11358 /* Check for retry */
11359 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
11360 goto out;
11361 /* FDISC failed */
11362 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11363 "0126 FDISC failed. (x%x/x%x)\n",
11364 ulp_status, ulp_word4);
11365 goto fdisc_failed;
11366 }
11367
11368 lpfc_check_nlp_post_devloss(vport, ndlp);
11369
11370 spin_lock_irq(shost->host_lock);
11371 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
11372 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
11373 vport->fc_flag |= FC_FABRIC;
11374 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
11375 vport->fc_flag |= FC_PUBLIC_LOOP;
11376 spin_unlock_irq(shost->host_lock);
11377
11378 vport->fc_myDID = ulp_word4 & Mask_DID;
11379 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
11380 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
11381 if (!prsp)
11382 goto out;
11383 if (!lpfc_is_els_acc_rsp(prsp))
11384 goto out;
11385
11386 sp = prsp->virt + sizeof(uint32_t);
11387 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
11388 memcpy(&vport->fabric_portname, &sp->portName,
11389 sizeof(struct lpfc_name));
11390 memcpy(&vport->fabric_nodename, &sp->nodeName,
11391 sizeof(struct lpfc_name));
11392 if (fabric_param_changed &&
11393 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
11394 /* If our NportID changed, we need to ensure all
11395 * remaining NPORTs get unreg_login'ed so we can
11396 * issue unreg_vpi.
11397 */
11398 list_for_each_entry_safe(np, next_np,
11399 &vport->fc_nodes, nlp_listp) {
11400 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
11401 !(np->nlp_flag & NLP_NPR_ADISC))
11402 continue;
11403 spin_lock_irq(&ndlp->lock);
11404 np->nlp_flag &= ~NLP_NPR_ADISC;
11405 spin_unlock_irq(&ndlp->lock);
11406 lpfc_unreg_rpi(vport, np);
11407 }
11408 lpfc_cleanup_pending_mbox(vport);
11409
11410 if (phba->sli_rev == LPFC_SLI_REV4)
11411 lpfc_sli4_unreg_all_rpis(vport);
11412
11413 lpfc_mbx_unreg_vpi(vport);
11414 spin_lock_irq(shost->host_lock);
11415 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
11416 if (phba->sli_rev == LPFC_SLI_REV4)
11417 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
11418 else
11419 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
11420 spin_unlock_irq(shost->host_lock);
11421 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
11422 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
11423 /*
11424 * Driver needs to re-reg VPI in order for f/w
11425 * to update the MAC address.
11426 */
11427 lpfc_register_new_vport(phba, vport, ndlp);
11428 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
11429 goto out;
11430 }
11431
11432 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
11433 lpfc_issue_init_vpi(vport);
11434 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
11435 lpfc_register_new_vport(phba, vport, ndlp);
11436 else
11437 lpfc_do_scr_ns_plogi(phba, vport);
11438
11439 /* The FDISC completed successfully. Move the fabric ndlp to
11440 * UNMAPPED state and register with the transport.
11441 */
11442 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
11443 goto out;
11444
11445 fdisc_failed:
11446 if (vport->fc_vport &&
11447 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
11448 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11449 /* Cancel discovery timer */
11450 lpfc_can_disctmo(vport);
11451 out:
11452 lpfc_els_free_iocb(phba, cmdiocb);
11453 lpfc_nlp_put(ndlp);
11454 }
11455
11456 /**
11457 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
11458 * @vport: pointer to a virtual N_Port data structure.
11459 * @ndlp: pointer to a node-list data structure.
11460 * @retry: number of retries to the command IOCB.
11461 *
11462 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
11463 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
11464 * routine to issue the IOCB, which makes sure only one outstanding fabric
11465 * IOCB will be sent off HBA at any given time.
11466 *
11467 * Note that the ndlp reference count will be incremented by 1 for holding the
11468 * ndlp and the reference to ndlp will be stored into the ndlp field of
11469 * the IOCB for the completion callback function to the FDISC ELS command.
11470 *
11471 * Return code
11472 * 0 - Successfully issued fdisc iocb command
11473 * 1 - Failed to issue fdisc iocb command
11474 **/
11475 static int
lpfc_issue_els_fdisc(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)11476 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
11477 uint8_t retry)
11478 {
11479 struct lpfc_hba *phba = vport->phba;
11480 IOCB_t *icmd;
11481 union lpfc_wqe128 *wqe = NULL;
11482 struct lpfc_iocbq *elsiocb;
11483 struct serv_parm *sp;
11484 uint8_t *pcmd;
11485 uint16_t cmdsize;
11486 int did = ndlp->nlp_DID;
11487 int rc;
11488
11489 vport->port_state = LPFC_FDISC;
11490 vport->fc_myDID = 0;
11491 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
11492 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
11493 ELS_CMD_FDISC);
11494 if (!elsiocb) {
11495 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11496 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11497 "0255 Issue FDISC: no IOCB\n");
11498 return 1;
11499 }
11500
11501 if (phba->sli_rev == LPFC_SLI_REV4) {
11502 wqe = &elsiocb->wqe;
11503 bf_set(els_req64_sid, &wqe->els_req, 0);
11504 bf_set(els_req64_sp, &wqe->els_req, 1);
11505 } else {
11506 icmd = &elsiocb->iocb;
11507 icmd->un.elsreq64.myID = 0;
11508 icmd->un.elsreq64.fl = 1;
11509 icmd->ulpCt_h = 1;
11510 icmd->ulpCt_l = 0;
11511 }
11512
11513 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
11514 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
11515 pcmd += sizeof(uint32_t); /* CSP Word 1 */
11516 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
11517 sp = (struct serv_parm *) pcmd;
11518 /* Setup CSPs accordingly for Fabric */
11519 sp->cmn.e_d_tov = 0;
11520 sp->cmn.w2.r_a_tov = 0;
11521 sp->cmn.virtual_fabric_support = 0;
11522 sp->cls1.classValid = 0;
11523 sp->cls2.seqDelivery = 1;
11524 sp->cls3.seqDelivery = 1;
11525
11526 pcmd += sizeof(uint32_t); /* CSP Word 2 */
11527 pcmd += sizeof(uint32_t); /* CSP Word 3 */
11528 pcmd += sizeof(uint32_t); /* CSP Word 4 */
11529 pcmd += sizeof(uint32_t); /* Port Name */
11530 memcpy(pcmd, &vport->fc_portname, 8);
11531 pcmd += sizeof(uint32_t); /* Node Name */
11532 pcmd += sizeof(uint32_t); /* Node Name */
11533 memcpy(pcmd, &vport->fc_nodename, 8);
11534 sp->cmn.valid_vendor_ver_level = 0;
11535 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
11536 lpfc_set_disctmo(vport);
11537
11538 phba->fc_stat.elsXmitFDISC++;
11539 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc;
11540
11541 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11542 "Issue FDISC: did:x%x",
11543 did, 0, 0);
11544
11545 elsiocb->ndlp = lpfc_nlp_get(ndlp);
11546 if (!elsiocb->ndlp)
11547 goto err_out;
11548
11549 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
11550 if (rc == IOCB_ERROR) {
11551 lpfc_nlp_put(ndlp);
11552 goto err_out;
11553 }
11554
11555 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
11556 return 0;
11557
11558 err_out:
11559 lpfc_els_free_iocb(phba, elsiocb);
11560 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11561 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11562 "0256 Issue FDISC: Cannot send IOCB\n");
11563 return 1;
11564 }
11565
11566 /**
11567 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
11568 * @phba: pointer to lpfc hba data structure.
11569 * @cmdiocb: pointer to lpfc command iocb data structure.
11570 * @rspiocb: pointer to lpfc response iocb data structure.
11571 *
11572 * This routine is the completion callback function to the issuing of a LOGO
11573 * ELS command off a vport. It frees the command IOCB and then decrement the
11574 * reference count held on ndlp for this completion function, indicating that
11575 * the reference to the ndlp is no long needed. Note that the
11576 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
11577 * callback function and an additional explicit ndlp reference decrementation
11578 * will trigger the actual release of the ndlp.
11579 **/
11580 static void
lpfc_cmpl_els_npiv_logo(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11581 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11582 struct lpfc_iocbq *rspiocb)
11583 {
11584 struct lpfc_vport *vport = cmdiocb->vport;
11585 IOCB_t *irsp;
11586 struct lpfc_nodelist *ndlp;
11587 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11588 u32 ulp_status, ulp_word4, did, tmo;
11589
11590 ndlp = cmdiocb->ndlp;
11591
11592 ulp_status = get_job_ulpstatus(phba, rspiocb);
11593 ulp_word4 = get_job_word4(phba, rspiocb);
11594
11595 if (phba->sli_rev == LPFC_SLI_REV4) {
11596 did = get_job_els_rsp64_did(phba, cmdiocb);
11597 tmo = get_wqe_tmo(cmdiocb);
11598 } else {
11599 irsp = &rspiocb->iocb;
11600 did = get_job_els_rsp64_did(phba, rspiocb);
11601 tmo = irsp->ulpTimeout;
11602 }
11603
11604 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11605 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
11606 ulp_status, ulp_word4, did);
11607
11608 /* NPIV LOGO completes to NPort <nlp_DID> */
11609 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
11610 "2928 NPIV LOGO completes to NPort x%x "
11611 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
11612 ndlp->nlp_DID, ulp_status, ulp_word4,
11613 tmo, vport->num_disc_nodes,
11614 kref_read(&ndlp->kref), ndlp->nlp_flag,
11615 ndlp->fc4_xpt_flags);
11616
11617 if (ulp_status == IOSTAT_SUCCESS) {
11618 spin_lock_irq(shost->host_lock);
11619 vport->fc_flag &= ~FC_NDISC_ACTIVE;
11620 vport->fc_flag &= ~FC_FABRIC;
11621 spin_unlock_irq(shost->host_lock);
11622 lpfc_can_disctmo(vport);
11623 }
11624
11625 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
11626 /* Wake up lpfc_vport_delete if waiting...*/
11627 if (ndlp->logo_waitq)
11628 wake_up(ndlp->logo_waitq);
11629 spin_lock_irq(&ndlp->lock);
11630 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND);
11631 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
11632 spin_unlock_irq(&ndlp->lock);
11633 }
11634
11635 /* Safe to release resources now. */
11636 lpfc_els_free_iocb(phba, cmdiocb);
11637 lpfc_nlp_put(ndlp);
11638 }
11639
11640 /**
11641 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
11642 * @vport: pointer to a virtual N_Port data structure.
11643 * @ndlp: pointer to a node-list data structure.
11644 *
11645 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
11646 *
11647 * Note that the ndlp reference count will be incremented by 1 for holding the
11648 * ndlp and the reference to ndlp will be stored into the ndlp field of
11649 * the IOCB for the completion callback function to the LOGO ELS command.
11650 *
11651 * Return codes
11652 * 0 - Successfully issued logo off the @vport
11653 * 1 - Failed to issue logo off the @vport
11654 **/
11655 int
lpfc_issue_els_npiv_logo(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)11656 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
11657 {
11658 int rc = 0;
11659 struct lpfc_hba *phba = vport->phba;
11660 struct lpfc_iocbq *elsiocb;
11661 uint8_t *pcmd;
11662 uint16_t cmdsize;
11663
11664 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
11665 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
11666 ELS_CMD_LOGO);
11667 if (!elsiocb)
11668 return 1;
11669
11670 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
11671 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
11672 pcmd += sizeof(uint32_t);
11673
11674 /* Fill in LOGO payload */
11675 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
11676 pcmd += sizeof(uint32_t);
11677 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
11678
11679 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11680 "Issue LOGO npiv did:x%x flg:x%x",
11681 ndlp->nlp_DID, ndlp->nlp_flag, 0);
11682
11683 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo;
11684 spin_lock_irq(&ndlp->lock);
11685 ndlp->nlp_flag |= NLP_LOGO_SND;
11686 spin_unlock_irq(&ndlp->lock);
11687 elsiocb->ndlp = lpfc_nlp_get(ndlp);
11688 if (!elsiocb->ndlp) {
11689 lpfc_els_free_iocb(phba, elsiocb);
11690 goto err;
11691 }
11692
11693 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
11694 if (rc == IOCB_ERROR) {
11695 lpfc_els_free_iocb(phba, elsiocb);
11696 lpfc_nlp_put(ndlp);
11697 goto err;
11698 }
11699 return 0;
11700
11701 err:
11702 spin_lock_irq(&ndlp->lock);
11703 ndlp->nlp_flag &= ~NLP_LOGO_SND;
11704 spin_unlock_irq(&ndlp->lock);
11705 return 1;
11706 }
11707
11708 /**
11709 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
11710 * @t: timer context used to obtain the lpfc hba.
11711 *
11712 * This routine is invoked by the fabric iocb block timer after
11713 * timeout. It posts the fabric iocb block timeout event by setting the
11714 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
11715 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
11716 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
11717 * posted event WORKER_FABRIC_BLOCK_TMO.
11718 **/
11719 void
lpfc_fabric_block_timeout(struct timer_list * t)11720 lpfc_fabric_block_timeout(struct timer_list *t)
11721 {
11722 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer);
11723 unsigned long iflags;
11724 uint32_t tmo_posted;
11725
11726 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11727 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
11728 if (!tmo_posted)
11729 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
11730 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11731
11732 if (!tmo_posted)
11733 lpfc_worker_wake_up(phba);
11734 return;
11735 }
11736
11737 /**
11738 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
11739 * @phba: pointer to lpfc hba data structure.
11740 *
11741 * This routine issues one fabric iocb from the driver internal list to
11742 * the HBA. It first checks whether it's ready to issue one fabric iocb to
11743 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
11744 * remove one pending fabric iocb from the driver internal list and invokes
11745 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
11746 **/
11747 static void
lpfc_resume_fabric_iocbs(struct lpfc_hba * phba)11748 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
11749 {
11750 struct lpfc_iocbq *iocb;
11751 unsigned long iflags;
11752 int ret;
11753
11754 repeat:
11755 iocb = NULL;
11756 spin_lock_irqsave(&phba->hbalock, iflags);
11757 /* Post any pending iocb to the SLI layer */
11758 if (atomic_read(&phba->fabric_iocb_count) == 0) {
11759 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
11760 list);
11761 if (iocb)
11762 /* Increment fabric iocb count to hold the position */
11763 atomic_inc(&phba->fabric_iocb_count);
11764 }
11765 spin_unlock_irqrestore(&phba->hbalock, iflags);
11766 if (iocb) {
11767 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
11768 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
11769 iocb->cmd_flag |= LPFC_IO_FABRIC;
11770
11771 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
11772 "Fabric sched1: ste:x%x",
11773 iocb->vport->port_state, 0, 0);
11774
11775 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
11776
11777 if (ret == IOCB_ERROR) {
11778 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
11779 iocb->fabric_cmd_cmpl = NULL;
11780 iocb->cmd_flag &= ~LPFC_IO_FABRIC;
11781 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT);
11782 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
11783 iocb->cmd_cmpl(phba, iocb, iocb);
11784
11785 atomic_dec(&phba->fabric_iocb_count);
11786 goto repeat;
11787 }
11788 }
11789 }
11790
11791 /**
11792 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
11793 * @phba: pointer to lpfc hba data structure.
11794 *
11795 * This routine unblocks the issuing fabric iocb command. The function
11796 * will clear the fabric iocb block bit and then invoke the routine
11797 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
11798 * from the driver internal fabric iocb list.
11799 **/
11800 void
lpfc_unblock_fabric_iocbs(struct lpfc_hba * phba)11801 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
11802 {
11803 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
11804
11805 lpfc_resume_fabric_iocbs(phba);
11806 return;
11807 }
11808
11809 /**
11810 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
11811 * @phba: pointer to lpfc hba data structure.
11812 *
11813 * This routine blocks the issuing fabric iocb for a specified amount of
11814 * time (currently 100 ms). This is done by set the fabric iocb block bit
11815 * and set up a timeout timer for 100ms. When the block bit is set, no more
11816 * fabric iocb will be issued out of the HBA.
11817 **/
11818 static void
lpfc_block_fabric_iocbs(struct lpfc_hba * phba)11819 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
11820 {
11821 int blocked;
11822
11823 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
11824 /* Start a timer to unblock fabric iocbs after 100ms */
11825 if (!blocked)
11826 mod_timer(&phba->fabric_block_timer,
11827 jiffies + msecs_to_jiffies(100));
11828
11829 return;
11830 }
11831
11832 /**
11833 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
11834 * @phba: pointer to lpfc hba data structure.
11835 * @cmdiocb: pointer to lpfc command iocb data structure.
11836 * @rspiocb: pointer to lpfc response iocb data structure.
11837 *
11838 * This routine is the callback function that is put to the fabric iocb's
11839 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback
11840 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback
11841 * function first restores and invokes the original iocb's callback function
11842 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
11843 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
11844 **/
11845 static void
lpfc_cmpl_fabric_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11846 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11847 struct lpfc_iocbq *rspiocb)
11848 {
11849 struct ls_rjt stat;
11850 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
11851 u32 ulp_word4 = get_job_word4(phba, rspiocb);
11852
11853 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
11854
11855 switch (ulp_status) {
11856 case IOSTAT_NPORT_RJT:
11857 case IOSTAT_FABRIC_RJT:
11858 if (ulp_word4 & RJT_UNAVAIL_TEMP)
11859 lpfc_block_fabric_iocbs(phba);
11860 break;
11861
11862 case IOSTAT_NPORT_BSY:
11863 case IOSTAT_FABRIC_BSY:
11864 lpfc_block_fabric_iocbs(phba);
11865 break;
11866
11867 case IOSTAT_LS_RJT:
11868 stat.un.ls_rjt_error_be =
11869 cpu_to_be32(ulp_word4);
11870 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
11871 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
11872 lpfc_block_fabric_iocbs(phba);
11873 break;
11874 }
11875
11876 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
11877
11878 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl;
11879 cmdiocb->fabric_cmd_cmpl = NULL;
11880 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
11881 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb);
11882
11883 atomic_dec(&phba->fabric_iocb_count);
11884 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
11885 /* Post any pending iocbs to HBA */
11886 lpfc_resume_fabric_iocbs(phba);
11887 }
11888 }
11889
11890 /**
11891 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
11892 * @phba: pointer to lpfc hba data structure.
11893 * @iocb: pointer to lpfc command iocb data structure.
11894 *
11895 * This routine is used as the top-level API for issuing a fabric iocb command
11896 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
11897 * function makes sure that only one fabric bound iocb will be outstanding at
11898 * any given time. As such, this function will first check to see whether there
11899 * is already an outstanding fabric iocb on the wire. If so, it will put the
11900 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
11901 * issued later. Otherwise, it will issue the iocb on the wire and update the
11902 * fabric iocb count it indicate that there is one fabric iocb on the wire.
11903 *
11904 * Note, this implementation has a potential sending out fabric IOCBs out of
11905 * order. The problem is caused by the construction of the "ready" boolen does
11906 * not include the condition that the internal fabric IOCB list is empty. As
11907 * such, it is possible a fabric IOCB issued by this routine might be "jump"
11908 * ahead of the fabric IOCBs in the internal list.
11909 *
11910 * Return code
11911 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
11912 * IOCB_ERROR - failed to issue fabric iocb
11913 **/
11914 static int
lpfc_issue_fabric_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * iocb)11915 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
11916 {
11917 unsigned long iflags;
11918 int ready;
11919 int ret;
11920
11921 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1);
11922
11923 spin_lock_irqsave(&phba->hbalock, iflags);
11924 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
11925 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
11926
11927 if (ready)
11928 /* Increment fabric iocb count to hold the position */
11929 atomic_inc(&phba->fabric_iocb_count);
11930 spin_unlock_irqrestore(&phba->hbalock, iflags);
11931 if (ready) {
11932 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
11933 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
11934 iocb->cmd_flag |= LPFC_IO_FABRIC;
11935
11936 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
11937 "Fabric sched2: ste:x%x",
11938 iocb->vport->port_state, 0, 0);
11939
11940 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
11941
11942 if (ret == IOCB_ERROR) {
11943 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
11944 iocb->fabric_cmd_cmpl = NULL;
11945 iocb->cmd_flag &= ~LPFC_IO_FABRIC;
11946 atomic_dec(&phba->fabric_iocb_count);
11947 }
11948 } else {
11949 spin_lock_irqsave(&phba->hbalock, iflags);
11950 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
11951 spin_unlock_irqrestore(&phba->hbalock, iflags);
11952 ret = IOCB_SUCCESS;
11953 }
11954 return ret;
11955 }
11956
11957 /**
11958 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
11959 * @vport: pointer to a virtual N_Port data structure.
11960 *
11961 * This routine aborts all the IOCBs associated with a @vport from the
11962 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
11963 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
11964 * list, removes each IOCB associated with the @vport off the list, set the
11965 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
11966 * associated with the IOCB.
11967 **/
lpfc_fabric_abort_vport(struct lpfc_vport * vport)11968 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
11969 {
11970 LIST_HEAD(completions);
11971 struct lpfc_hba *phba = vport->phba;
11972 struct lpfc_iocbq *tmp_iocb, *piocb;
11973
11974 spin_lock_irq(&phba->hbalock);
11975 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
11976 list) {
11977
11978 if (piocb->vport != vport)
11979 continue;
11980
11981 list_move_tail(&piocb->list, &completions);
11982 }
11983 spin_unlock_irq(&phba->hbalock);
11984
11985 /* Cancel all the IOCBs from the completions list */
11986 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11987 IOERR_SLI_ABORTED);
11988 }
11989
11990 /**
11991 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
11992 * @ndlp: pointer to a node-list data structure.
11993 *
11994 * This routine aborts all the IOCBs associated with an @ndlp from the
11995 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
11996 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
11997 * list, removes each IOCB associated with the @ndlp off the list, set the
11998 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
11999 * associated with the IOCB.
12000 **/
lpfc_fabric_abort_nport(struct lpfc_nodelist * ndlp)12001 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
12002 {
12003 LIST_HEAD(completions);
12004 struct lpfc_hba *phba = ndlp->phba;
12005 struct lpfc_iocbq *tmp_iocb, *piocb;
12006 struct lpfc_sli_ring *pring;
12007
12008 pring = lpfc_phba_elsring(phba);
12009
12010 if (unlikely(!pring))
12011 return;
12012
12013 spin_lock_irq(&phba->hbalock);
12014 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
12015 list) {
12016 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
12017
12018 list_move_tail(&piocb->list, &completions);
12019 }
12020 }
12021 spin_unlock_irq(&phba->hbalock);
12022
12023 /* Cancel all the IOCBs from the completions list */
12024 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12025 IOERR_SLI_ABORTED);
12026 }
12027
12028 /**
12029 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
12030 * @phba: pointer to lpfc hba data structure.
12031 *
12032 * This routine aborts all the IOCBs currently on the driver internal
12033 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
12034 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
12035 * list, removes IOCBs off the list, set the status field to
12036 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
12037 * the IOCB.
12038 **/
lpfc_fabric_abort_hba(struct lpfc_hba * phba)12039 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
12040 {
12041 LIST_HEAD(completions);
12042
12043 spin_lock_irq(&phba->hbalock);
12044 list_splice_init(&phba->fabric_iocb_list, &completions);
12045 spin_unlock_irq(&phba->hbalock);
12046
12047 /* Cancel all the IOCBs from the completions list */
12048 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12049 IOERR_SLI_ABORTED);
12050 }
12051
12052 /**
12053 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
12054 * @vport: pointer to lpfc vport data structure.
12055 *
12056 * This routine is invoked by the vport cleanup for deletions and the cleanup
12057 * for an ndlp on removal.
12058 **/
12059 void
lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport * vport)12060 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
12061 {
12062 struct lpfc_hba *phba = vport->phba;
12063 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
12064 struct lpfc_nodelist *ndlp = NULL;
12065 unsigned long iflag = 0;
12066
12067 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
12068 list_for_each_entry_safe(sglq_entry, sglq_next,
12069 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
12070 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
12071 lpfc_nlp_put(sglq_entry->ndlp);
12072 ndlp = sglq_entry->ndlp;
12073 sglq_entry->ndlp = NULL;
12074
12075 /* If the xri on the abts_els_sgl list is for the Fport
12076 * node and the vport is unloading, the xri aborted wcqe
12077 * likely isn't coming back. Just release the sgl.
12078 */
12079 if ((vport->load_flag & FC_UNLOADING) &&
12080 ndlp->nlp_DID == Fabric_DID) {
12081 list_del(&sglq_entry->list);
12082 sglq_entry->state = SGL_FREED;
12083 list_add_tail(&sglq_entry->list,
12084 &phba->sli4_hba.lpfc_els_sgl_list);
12085 }
12086 }
12087 }
12088 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
12089 return;
12090 }
12091
12092 /**
12093 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
12094 * @phba: pointer to lpfc hba data structure.
12095 * @axri: pointer to the els xri abort wcqe structure.
12096 *
12097 * This routine is invoked by the worker thread to process a SLI4 slow-path
12098 * ELS aborted xri.
12099 **/
12100 void
lpfc_sli4_els_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri)12101 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
12102 struct sli4_wcqe_xri_aborted *axri)
12103 {
12104 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
12105 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
12106 uint16_t lxri = 0;
12107
12108 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
12109 unsigned long iflag = 0;
12110 struct lpfc_nodelist *ndlp;
12111 struct lpfc_sli_ring *pring;
12112
12113 pring = lpfc_phba_elsring(phba);
12114
12115 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
12116 list_for_each_entry_safe(sglq_entry, sglq_next,
12117 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
12118 if (sglq_entry->sli4_xritag == xri) {
12119 list_del(&sglq_entry->list);
12120 ndlp = sglq_entry->ndlp;
12121 sglq_entry->ndlp = NULL;
12122 list_add_tail(&sglq_entry->list,
12123 &phba->sli4_hba.lpfc_els_sgl_list);
12124 sglq_entry->state = SGL_FREED;
12125 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
12126 iflag);
12127
12128 if (ndlp) {
12129 lpfc_set_rrq_active(phba, ndlp,
12130 sglq_entry->sli4_lxritag,
12131 rxid, 1);
12132 lpfc_nlp_put(ndlp);
12133 }
12134
12135 /* Check if TXQ queue needs to be serviced */
12136 if (pring && !list_empty(&pring->txq))
12137 lpfc_worker_wake_up(phba);
12138 return;
12139 }
12140 }
12141 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
12142 lxri = lpfc_sli4_xri_inrange(phba, xri);
12143 if (lxri == NO_XRI)
12144 return;
12145
12146 spin_lock_irqsave(&phba->hbalock, iflag);
12147 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
12148 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
12149 spin_unlock_irqrestore(&phba->hbalock, iflag);
12150 return;
12151 }
12152 sglq_entry->state = SGL_XRI_ABORTED;
12153 spin_unlock_irqrestore(&phba->hbalock, iflag);
12154 return;
12155 }
12156
12157 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
12158 * @vport: pointer to virtual port object.
12159 * @ndlp: nodelist pointer for the impacted node.
12160 *
12161 * The driver calls this routine in response to an SLI4 XRI ABORT CQE
12162 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
12163 * the driver is required to send a LOGO to the remote node before it
12164 * attempts to recover its login to the remote node.
12165 */
12166 void
lpfc_sli_abts_recover_port(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)12167 lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
12168 struct lpfc_nodelist *ndlp)
12169 {
12170 struct Scsi_Host *shost;
12171 struct lpfc_hba *phba;
12172 unsigned long flags = 0;
12173
12174 shost = lpfc_shost_from_vport(vport);
12175 phba = vport->phba;
12176 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
12177 lpfc_printf_log(phba, KERN_INFO,
12178 LOG_SLI, "3093 No rport recovery needed. "
12179 "rport in state 0x%x\n", ndlp->nlp_state);
12180 return;
12181 }
12182 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12183 "3094 Start rport recovery on shost id 0x%x "
12184 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
12185 "flags 0x%x\n",
12186 shost->host_no, ndlp->nlp_DID,
12187 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
12188 ndlp->nlp_flag);
12189 /*
12190 * The rport is not responding. Remove the FCP-2 flag to prevent
12191 * an ADISC in the follow-up recovery code.
12192 */
12193 spin_lock_irqsave(&ndlp->lock, flags);
12194 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
12195 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
12196 spin_unlock_irqrestore(&ndlp->lock, flags);
12197 lpfc_unreg_rpi(vport, ndlp);
12198 }
12199
lpfc_init_cs_ctl_bitmap(struct lpfc_vport * vport)12200 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport)
12201 {
12202 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE);
12203 }
12204
12205 static void
lpfc_vmid_set_cs_ctl_range(struct lpfc_vport * vport,u32 min,u32 max)12206 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max)
12207 {
12208 u32 i;
12209
12210 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE))
12211 return;
12212
12213 for (i = min; i <= max; i++)
12214 set_bit(i, vport->vmid_priority_range);
12215 }
12216
lpfc_vmid_put_cs_ctl(struct lpfc_vport * vport,u32 ctcl_vmid)12217 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid)
12218 {
12219 set_bit(ctcl_vmid, vport->vmid_priority_range);
12220 }
12221
lpfc_vmid_get_cs_ctl(struct lpfc_vport * vport)12222 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport)
12223 {
12224 u32 i;
12225
12226 i = find_first_bit(vport->vmid_priority_range,
12227 LPFC_VMID_MAX_PRIORITY_RANGE);
12228
12229 if (i == LPFC_VMID_MAX_PRIORITY_RANGE)
12230 return 0;
12231
12232 clear_bit(i, vport->vmid_priority_range);
12233 return i;
12234 }
12235
12236 #define MAX_PRIORITY_DESC 255
12237
12238 static void
lpfc_cmpl_els_qfpa(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)12239 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12240 struct lpfc_iocbq *rspiocb)
12241 {
12242 struct lpfc_vport *vport = cmdiocb->vport;
12243 struct priority_range_desc *desc;
12244 struct lpfc_dmabuf *prsp = NULL;
12245 struct lpfc_vmid_priority_range *vmid_range = NULL;
12246 u32 *data;
12247 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf;
12248 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12249 u32 ulp_word4 = get_job_word4(phba, rspiocb);
12250 u8 *pcmd, max_desc;
12251 u32 len, i;
12252 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12253
12254 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
12255 if (!prsp)
12256 goto out;
12257
12258 pcmd = prsp->virt;
12259 data = (u32 *)pcmd;
12260 if (data[0] == ELS_CMD_LS_RJT) {
12261 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
12262 "3277 QFPA LS_RJT x%x x%x\n",
12263 data[0], data[1]);
12264 goto out;
12265 }
12266 if (ulp_status) {
12267 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
12268 "6529 QFPA failed with status x%x x%x\n",
12269 ulp_status, ulp_word4);
12270 goto out;
12271 }
12272
12273 if (!vport->qfpa_res) {
12274 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res);
12275 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res),
12276 GFP_KERNEL);
12277 if (!vport->qfpa_res)
12278 goto out;
12279 }
12280
12281 len = *((u32 *)(pcmd + 4));
12282 len = be32_to_cpu(len);
12283 memcpy(vport->qfpa_res, pcmd, len + 8);
12284 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE;
12285
12286 desc = (struct priority_range_desc *)(pcmd + 8);
12287 vmid_range = vport->vmid_priority.vmid_range;
12288 if (!vmid_range) {
12289 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range),
12290 GFP_KERNEL);
12291 if (!vmid_range) {
12292 kfree(vport->qfpa_res);
12293 goto out;
12294 }
12295 vport->vmid_priority.vmid_range = vmid_range;
12296 }
12297 vport->vmid_priority.num_descriptors = len;
12298
12299 for (i = 0; i < len; i++, vmid_range++, desc++) {
12300 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
12301 "6539 vmid values low=%d, high=%d, qos=%d, "
12302 "local ve id=%d\n", desc->lo_range,
12303 desc->hi_range, desc->qos_priority,
12304 desc->local_ve_id);
12305
12306 vmid_range->low = desc->lo_range << 1;
12307 if (desc->local_ve_id == QFPA_ODD_ONLY)
12308 vmid_range->low++;
12309 if (desc->qos_priority)
12310 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED;
12311 vmid_range->qos = desc->qos_priority;
12312
12313 vmid_range->high = desc->hi_range << 1;
12314 if ((desc->local_ve_id == QFPA_ODD_ONLY) ||
12315 (desc->local_ve_id == QFPA_EVEN_ODD))
12316 vmid_range->high++;
12317 }
12318 lpfc_init_cs_ctl_bitmap(vport);
12319 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) {
12320 lpfc_vmid_set_cs_ctl_range(vport,
12321 vport->vmid_priority.vmid_range[i].low,
12322 vport->vmid_priority.vmid_range[i].high);
12323 }
12324
12325 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL;
12326 out:
12327 lpfc_els_free_iocb(phba, cmdiocb);
12328 lpfc_nlp_put(ndlp);
12329 }
12330
lpfc_issue_els_qfpa(struct lpfc_vport * vport)12331 int lpfc_issue_els_qfpa(struct lpfc_vport *vport)
12332 {
12333 struct lpfc_hba *phba = vport->phba;
12334 struct lpfc_nodelist *ndlp;
12335 struct lpfc_iocbq *elsiocb;
12336 u8 *pcmd;
12337 int ret;
12338
12339 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
12340 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
12341 return -ENXIO;
12342
12343 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp,
12344 ndlp->nlp_DID, ELS_CMD_QFPA);
12345 if (!elsiocb)
12346 return -ENOMEM;
12347
12348 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
12349
12350 *((u32 *)(pcmd)) = ELS_CMD_QFPA;
12351 pcmd += 4;
12352
12353 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa;
12354
12355 elsiocb->ndlp = lpfc_nlp_get(ndlp);
12356 if (!elsiocb->ndlp) {
12357 lpfc_els_free_iocb(vport->phba, elsiocb);
12358 return -ENXIO;
12359 }
12360
12361 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2);
12362 if (ret != IOCB_SUCCESS) {
12363 lpfc_els_free_iocb(phba, elsiocb);
12364 lpfc_nlp_put(ndlp);
12365 return -EIO;
12366 }
12367 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED;
12368 return 0;
12369 }
12370
12371 int
lpfc_vmid_uvem(struct lpfc_vport * vport,struct lpfc_vmid * vmid,bool instantiated)12372 lpfc_vmid_uvem(struct lpfc_vport *vport,
12373 struct lpfc_vmid *vmid, bool instantiated)
12374 {
12375 struct lpfc_vem_id_desc *vem_id_desc;
12376 struct lpfc_nodelist *ndlp;
12377 struct lpfc_iocbq *elsiocb;
12378 struct instantiated_ve_desc *inst_desc;
12379 struct lpfc_vmid_context *vmid_context;
12380 u8 *pcmd;
12381 u32 *len;
12382 int ret = 0;
12383
12384 ndlp = lpfc_findnode_did(vport, Fabric_DID);
12385 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
12386 return -ENXIO;
12387
12388 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL);
12389 if (!vmid_context)
12390 return -ENOMEM;
12391 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2,
12392 ndlp, Fabric_DID, ELS_CMD_UVEM);
12393 if (!elsiocb)
12394 goto out;
12395
12396 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
12397 "3427 Host vmid %s %d\n",
12398 vmid->host_vmid, instantiated);
12399 vmid_context->vmp = vmid;
12400 vmid_context->nlp = ndlp;
12401 vmid_context->instantiated = instantiated;
12402 elsiocb->vmid_tag.vmid_context = vmid_context;
12403 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
12404
12405 if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0,
12406 sizeof(vport->lpfc_vmid_host_uuid)))
12407 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
12408 sizeof(vport->lpfc_vmid_host_uuid));
12409
12410 *((u32 *)(pcmd)) = ELS_CMD_UVEM;
12411 len = (u32 *)(pcmd + 4);
12412 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8);
12413
12414 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8);
12415 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG);
12416 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE);
12417 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid,
12418 sizeof(vem_id_desc->vem_id));
12419
12420 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32);
12421 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
12422 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE);
12423 memcpy(inst_desc->global_vem_id, vmid->host_vmid,
12424 sizeof(inst_desc->global_vem_id));
12425
12426 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID);
12427 bf_set(lpfc_instantiated_local_id, inst_desc,
12428 vmid->un.cs_ctl_vmid);
12429 if (instantiated) {
12430 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
12431 } else {
12432 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG);
12433 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid);
12434 }
12435 inst_desc->word6 = cpu_to_be32(inst_desc->word6);
12436
12437 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem;
12438
12439 elsiocb->ndlp = lpfc_nlp_get(ndlp);
12440 if (!elsiocb->ndlp) {
12441 lpfc_els_free_iocb(vport->phba, elsiocb);
12442 goto out;
12443 }
12444
12445 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0);
12446 if (ret != IOCB_SUCCESS) {
12447 lpfc_els_free_iocb(vport->phba, elsiocb);
12448 lpfc_nlp_put(ndlp);
12449 goto out;
12450 }
12451
12452 return 0;
12453 out:
12454 kfree(vmid_context);
12455 return -EIO;
12456 }
12457
12458 static void
lpfc_cmpl_els_uvem(struct lpfc_hba * phba,struct lpfc_iocbq * icmdiocb,struct lpfc_iocbq * rspiocb)12459 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
12460 struct lpfc_iocbq *rspiocb)
12461 {
12462 struct lpfc_vport *vport = icmdiocb->vport;
12463 struct lpfc_dmabuf *prsp = NULL;
12464 struct lpfc_vmid_context *vmid_context =
12465 icmdiocb->vmid_tag.vmid_context;
12466 struct lpfc_nodelist *ndlp = icmdiocb->ndlp;
12467 u8 *pcmd;
12468 u32 *data;
12469 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12470 u32 ulp_word4 = get_job_word4(phba, rspiocb);
12471 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf;
12472 struct lpfc_vmid *vmid;
12473
12474 vmid = vmid_context->vmp;
12475 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
12476 ndlp = NULL;
12477
12478 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
12479 if (!prsp)
12480 goto out;
12481 pcmd = prsp->virt;
12482 data = (u32 *)pcmd;
12483 if (data[0] == ELS_CMD_LS_RJT) {
12484 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
12485 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]);
12486 goto out;
12487 }
12488 if (ulp_status) {
12489 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
12490 "4533 UVEM error status %x: %x\n",
12491 ulp_status, ulp_word4);
12492 goto out;
12493 }
12494 spin_lock(&phba->hbalock);
12495 /* Set IN USE flag */
12496 vport->vmid_flag |= LPFC_VMID_IN_USE;
12497 phba->pport->vmid_flag |= LPFC_VMID_IN_USE;
12498 spin_unlock(&phba->hbalock);
12499
12500 if (vmid_context->instantiated) {
12501 write_lock(&vport->vmid_lock);
12502 vmid->flag |= LPFC_VMID_REGISTERED;
12503 vmid->flag &= ~LPFC_VMID_REQ_REGISTER;
12504 write_unlock(&vport->vmid_lock);
12505 }
12506
12507 out:
12508 kfree(vmid_context);
12509 lpfc_els_free_iocb(phba, icmdiocb);
12510 lpfc_nlp_put(ndlp);
12511 }
12512