1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_target.h"
8
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
11
12 #ifdef CONFIG_PPC
13 #define IS_PPCARCH true
14 #else
15 #define IS_PPCARCH false
16 #endif
17
18 static struct mb_cmd_name {
19 uint16_t cmd;
20 const char *str;
21 } mb_str[] = {
22 {MBC_GET_PORT_DATABASE, "GPDB"},
23 {MBC_GET_ID_LIST, "GIDList"},
24 {MBC_GET_LINK_PRIV_STATS, "Stats"},
25 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
26 };
27
mb_to_str(uint16_t cmd)28 static const char *mb_to_str(uint16_t cmd)
29 {
30 int i;
31 struct mb_cmd_name *e;
32
33 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
34 e = mb_str + i;
35 if (cmd == e->cmd)
36 return e->str;
37 }
38 return "unknown";
39 }
40
41 static struct rom_cmd {
42 uint16_t cmd;
43 } rom_cmds[] = {
44 { MBC_LOAD_RAM },
45 { MBC_EXECUTE_FIRMWARE },
46 { MBC_LOAD_FLASH_FIRMWARE },
47 { MBC_READ_RAM_WORD },
48 { MBC_MAILBOX_REGISTER_TEST },
49 { MBC_VERIFY_CHECKSUM },
50 { MBC_GET_FIRMWARE_VERSION },
51 { MBC_LOAD_RISC_RAM },
52 { MBC_DUMP_RISC_RAM },
53 { MBC_LOAD_RISC_RAM_EXTENDED },
54 { MBC_DUMP_RISC_RAM_EXTENDED },
55 { MBC_WRITE_RAM_WORD_EXTENDED },
56 { MBC_READ_RAM_EXTENDED },
57 { MBC_GET_RESOURCE_COUNTS },
58 { MBC_SET_FIRMWARE_OPTION },
59 { MBC_MID_INITIALIZE_FIRMWARE },
60 { MBC_GET_FIRMWARE_STATE },
61 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
62 { MBC_GET_RETRY_COUNT },
63 { MBC_TRACE_CONTROL },
64 { MBC_INITIALIZE_MULTIQ },
65 { MBC_IOCB_COMMAND_A64 },
66 { MBC_GET_ADAPTER_LOOP_ID },
67 { MBC_READ_SFP },
68 { MBC_SET_RNID_PARAMS },
69 { MBC_GET_RNID_PARAMS },
70 { MBC_GET_SET_ZIO_THRESHOLD },
71 };
72
is_rom_cmd(uint16_t cmd)73 static int is_rom_cmd(uint16_t cmd)
74 {
75 int i;
76 struct rom_cmd *wc;
77
78 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
79 wc = rom_cmds + i;
80 if (wc->cmd == cmd)
81 return 1;
82 }
83
84 return 0;
85 }
86
87 /*
88 * qla2x00_mailbox_command
89 * Issue mailbox command and waits for completion.
90 *
91 * Input:
92 * ha = adapter block pointer.
93 * mcp = driver internal mbx struct pointer.
94 *
95 * Output:
96 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
97 *
98 * Returns:
99 * 0 : QLA_SUCCESS = cmd performed success
100 * 1 : QLA_FUNCTION_FAILED (error encountered)
101 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
102 *
103 * Context:
104 * Kernel context.
105 */
106 static int
qla2x00_mailbox_command(scsi_qla_host_t * vha,mbx_cmd_t * mcp)107 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
108 {
109 int rval, i;
110 unsigned long flags = 0;
111 device_reg_t *reg;
112 uint8_t abort_active, eeh_delay;
113 uint8_t io_lock_on;
114 uint16_t command = 0;
115 uint16_t *iptr;
116 __le16 __iomem *optr;
117 uint32_t cnt;
118 uint32_t mboxes;
119 unsigned long wait_time;
120 struct qla_hw_data *ha = vha->hw;
121 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
122 u32 chip_reset;
123
124
125 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
126
127 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
128 ql_log(ql_log_warn, vha, 0x1001,
129 "PCI channel failed permanently, exiting.\n");
130 return QLA_FUNCTION_TIMEOUT;
131 }
132
133 if (vha->device_flags & DFLG_DEV_FAILED) {
134 ql_log(ql_log_warn, vha, 0x1002,
135 "Device in failed state, exiting.\n");
136 return QLA_FUNCTION_TIMEOUT;
137 }
138
139 /* if PCI error, then avoid mbx processing.*/
140 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
141 test_bit(UNLOADING, &base_vha->dpc_flags)) {
142 ql_log(ql_log_warn, vha, 0xd04e,
143 "PCI error, exiting.\n");
144 return QLA_FUNCTION_TIMEOUT;
145 }
146 eeh_delay = 0;
147 reg = ha->iobase;
148 io_lock_on = base_vha->flags.init_done;
149
150 rval = QLA_SUCCESS;
151 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
152 chip_reset = ha->chip_reset;
153
154 if (ha->flags.pci_channel_io_perm_failure) {
155 ql_log(ql_log_warn, vha, 0x1003,
156 "Perm failure on EEH timeout MBX, exiting.\n");
157 return QLA_FUNCTION_TIMEOUT;
158 }
159
160 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
161 /* Setting Link-Down error */
162 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
163 ql_log(ql_log_warn, vha, 0x1004,
164 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
165 return QLA_FUNCTION_TIMEOUT;
166 }
167
168 /* check if ISP abort is active and return cmd with timeout */
169 if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
170 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
171 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
172 !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
173 ql_log(ql_log_info, vha, 0x1005,
174 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
175 mcp->mb[0]);
176 return QLA_FUNCTION_TIMEOUT;
177 }
178
179 atomic_inc(&ha->num_pend_mbx_stage1);
180 /*
181 * Wait for active mailbox commands to finish by waiting at most tov
182 * seconds. This is to serialize actual issuing of mailbox cmds during
183 * non ISP abort time.
184 */
185 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
186 /* Timeout occurred. Return error. */
187 ql_log(ql_log_warn, vha, 0xd035,
188 "Cmd access timeout, cmd=0x%x, Exiting.\n",
189 mcp->mb[0]);
190 vha->hw_err_cnt++;
191 atomic_dec(&ha->num_pend_mbx_stage1);
192 return QLA_FUNCTION_TIMEOUT;
193 }
194 atomic_dec(&ha->num_pend_mbx_stage1);
195 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
196 ha->flags.eeh_busy) {
197 ql_log(ql_log_warn, vha, 0xd035,
198 "Purge mbox: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
199 ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
200 rval = QLA_ABORTED;
201 goto premature_exit;
202 }
203
204
205 /* Save mailbox command for debug */
206 ha->mcp = mcp;
207
208 ql_dbg(ql_dbg_mbx, vha, 0x1006,
209 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
210
211 spin_lock_irqsave(&ha->hardware_lock, flags);
212
213 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
214 ha->flags.mbox_busy) {
215 rval = QLA_ABORTED;
216 spin_unlock_irqrestore(&ha->hardware_lock, flags);
217 goto premature_exit;
218 }
219 ha->flags.mbox_busy = 1;
220
221 /* Load mailbox registers. */
222 if (IS_P3P_TYPE(ha))
223 optr = ®->isp82.mailbox_in[0];
224 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
225 optr = ®->isp24.mailbox0;
226 else
227 optr = MAILBOX_REG(ha, ®->isp, 0);
228
229 iptr = mcp->mb;
230 command = mcp->mb[0];
231 mboxes = mcp->out_mb;
232
233 ql_dbg(ql_dbg_mbx, vha, 0x1111,
234 "Mailbox registers (OUT):\n");
235 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
236 if (IS_QLA2200(ha) && cnt == 8)
237 optr = MAILBOX_REG(ha, ®->isp, 8);
238 if (mboxes & BIT_0) {
239 ql_dbg(ql_dbg_mbx, vha, 0x1112,
240 "mbox[%d]<-0x%04x\n", cnt, *iptr);
241 wrt_reg_word(optr, *iptr);
242 } else {
243 wrt_reg_word(optr, 0);
244 }
245
246 mboxes >>= 1;
247 optr++;
248 iptr++;
249 }
250
251 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
252 "I/O Address = %p.\n", optr);
253
254 /* Issue set host interrupt command to send cmd out. */
255 ha->flags.mbox_int = 0;
256 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
257 reinit_completion(&ha->mbx_intr_comp);
258
259 /* Unlock mbx registers and wait for interrupt */
260 ql_dbg(ql_dbg_mbx, vha, 0x100f,
261 "Going to unlock irq & waiting for interrupts. "
262 "jiffies=%lx.\n", jiffies);
263
264 /* Wait for mbx cmd completion until timeout */
265 atomic_inc(&ha->num_pend_mbx_stage2);
266 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
267 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
268
269 if (IS_P3P_TYPE(ha))
270 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
271 else if (IS_FWI2_CAPABLE(ha))
272 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
273 else
274 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
275 spin_unlock_irqrestore(&ha->hardware_lock, flags);
276
277 wait_time = jiffies;
278 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
279 mcp->tov * HZ)) {
280 ql_dbg(ql_dbg_mbx, vha, 0x117a,
281 "cmd=%x Timeout.\n", command);
282 spin_lock_irqsave(&ha->hardware_lock, flags);
283 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
284 reinit_completion(&ha->mbx_intr_comp);
285 spin_unlock_irqrestore(&ha->hardware_lock, flags);
286
287 if (chip_reset != ha->chip_reset) {
288 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
289
290 spin_lock_irqsave(&ha->hardware_lock, flags);
291 ha->flags.mbox_busy = 0;
292 spin_unlock_irqrestore(&ha->hardware_lock,
293 flags);
294 atomic_dec(&ha->num_pend_mbx_stage2);
295 rval = QLA_ABORTED;
296 goto premature_exit;
297 }
298 } else if (ha->flags.purge_mbox ||
299 chip_reset != ha->chip_reset) {
300 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
301
302 spin_lock_irqsave(&ha->hardware_lock, flags);
303 ha->flags.mbox_busy = 0;
304 spin_unlock_irqrestore(&ha->hardware_lock, flags);
305 atomic_dec(&ha->num_pend_mbx_stage2);
306 rval = QLA_ABORTED;
307 goto premature_exit;
308 }
309
310 if (time_after(jiffies, wait_time + 5 * HZ))
311 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
312 command, jiffies_to_msecs(jiffies - wait_time));
313 } else {
314 ql_dbg(ql_dbg_mbx, vha, 0x1011,
315 "Cmd=%x Polling Mode.\n", command);
316
317 if (IS_P3P_TYPE(ha)) {
318 if (rd_reg_dword(®->isp82.hint) &
319 HINT_MBX_INT_PENDING) {
320 ha->flags.mbox_busy = 0;
321 spin_unlock_irqrestore(&ha->hardware_lock,
322 flags);
323 atomic_dec(&ha->num_pend_mbx_stage2);
324 ql_dbg(ql_dbg_mbx, vha, 0x1012,
325 "Pending mailbox timeout, exiting.\n");
326 vha->hw_err_cnt++;
327 rval = QLA_FUNCTION_TIMEOUT;
328 goto premature_exit;
329 }
330 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
331 } else if (IS_FWI2_CAPABLE(ha))
332 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
333 else
334 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
335 spin_unlock_irqrestore(&ha->hardware_lock, flags);
336
337 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
338 while (!ha->flags.mbox_int) {
339 if (ha->flags.purge_mbox ||
340 chip_reset != ha->chip_reset) {
341 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
342
343 spin_lock_irqsave(&ha->hardware_lock, flags);
344 ha->flags.mbox_busy = 0;
345 spin_unlock_irqrestore(&ha->hardware_lock,
346 flags);
347 atomic_dec(&ha->num_pend_mbx_stage2);
348 rval = QLA_ABORTED;
349 goto premature_exit;
350 }
351
352 if (time_after(jiffies, wait_time))
353 break;
354
355 /* Check for pending interrupts. */
356 qla2x00_poll(ha->rsp_q_map[0]);
357
358 if (!ha->flags.mbox_int &&
359 !(IS_QLA2200(ha) &&
360 command == MBC_LOAD_RISC_RAM_EXTENDED))
361 msleep(10);
362 } /* while */
363 ql_dbg(ql_dbg_mbx, vha, 0x1013,
364 "Waited %d sec.\n",
365 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
366 }
367 atomic_dec(&ha->num_pend_mbx_stage2);
368
369 /* Check whether we timed out */
370 if (ha->flags.mbox_int) {
371 uint16_t *iptr2;
372
373 ql_dbg(ql_dbg_mbx, vha, 0x1014,
374 "Cmd=%x completed.\n", command);
375
376 /* Got interrupt. Clear the flag. */
377 ha->flags.mbox_int = 0;
378 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
379
380 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
381 spin_lock_irqsave(&ha->hardware_lock, flags);
382 ha->flags.mbox_busy = 0;
383 spin_unlock_irqrestore(&ha->hardware_lock, flags);
384
385 /* Setting Link-Down error */
386 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
387 ha->mcp = NULL;
388 rval = QLA_FUNCTION_FAILED;
389 ql_log(ql_log_warn, vha, 0xd048,
390 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
391 goto premature_exit;
392 }
393
394 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
395 ql_dbg(ql_dbg_mbx, vha, 0x11ff,
396 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
397 MBS_COMMAND_COMPLETE);
398 rval = QLA_FUNCTION_FAILED;
399 }
400
401 /* Load return mailbox registers. */
402 iptr2 = mcp->mb;
403 iptr = (uint16_t *)&ha->mailbox_out[0];
404 mboxes = mcp->in_mb;
405
406 ql_dbg(ql_dbg_mbx, vha, 0x1113,
407 "Mailbox registers (IN):\n");
408 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
409 if (mboxes & BIT_0) {
410 *iptr2 = *iptr;
411 ql_dbg(ql_dbg_mbx, vha, 0x1114,
412 "mbox[%d]->0x%04x\n", cnt, *iptr2);
413 }
414
415 mboxes >>= 1;
416 iptr2++;
417 iptr++;
418 }
419 } else {
420
421 uint16_t mb[8];
422 uint32_t ictrl, host_status, hccr;
423 uint16_t w;
424
425 if (IS_FWI2_CAPABLE(ha)) {
426 mb[0] = rd_reg_word(®->isp24.mailbox0);
427 mb[1] = rd_reg_word(®->isp24.mailbox1);
428 mb[2] = rd_reg_word(®->isp24.mailbox2);
429 mb[3] = rd_reg_word(®->isp24.mailbox3);
430 mb[7] = rd_reg_word(®->isp24.mailbox7);
431 ictrl = rd_reg_dword(®->isp24.ictrl);
432 host_status = rd_reg_dword(®->isp24.host_status);
433 hccr = rd_reg_dword(®->isp24.hccr);
434
435 ql_log(ql_log_warn, vha, 0xd04c,
436 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
437 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
438 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
439 mb[7], host_status, hccr);
440 vha->hw_err_cnt++;
441
442 } else {
443 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0);
444 ictrl = rd_reg_word(®->isp.ictrl);
445 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
446 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
447 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
448 vha->hw_err_cnt++;
449 }
450 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
451
452 /* Capture FW dump only, if PCI device active */
453 if (!pci_channel_offline(vha->hw->pdev)) {
454 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
455 if (w == 0xffff || ictrl == 0xffffffff ||
456 (chip_reset != ha->chip_reset)) {
457 /* This is special case if there is unload
458 * of driver happening and if PCI device go
459 * into bad state due to PCI error condition
460 * then only PCI ERR flag would be set.
461 * we will do premature exit for above case.
462 */
463 spin_lock_irqsave(&ha->hardware_lock, flags);
464 ha->flags.mbox_busy = 0;
465 spin_unlock_irqrestore(&ha->hardware_lock,
466 flags);
467 rval = QLA_FUNCTION_TIMEOUT;
468 goto premature_exit;
469 }
470
471 /* Attempt to capture firmware dump for further
472 * anallysis of the current formware state. we do not
473 * need to do this if we are intentionally generating
474 * a dump
475 */
476 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
477 qla2xxx_dump_fw(vha);
478 rval = QLA_FUNCTION_TIMEOUT;
479 }
480 }
481 spin_lock_irqsave(&ha->hardware_lock, flags);
482 ha->flags.mbox_busy = 0;
483 spin_unlock_irqrestore(&ha->hardware_lock, flags);
484
485 /* Clean up */
486 ha->mcp = NULL;
487
488 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
489 ql_dbg(ql_dbg_mbx, vha, 0x101a,
490 "Checking for additional resp interrupt.\n");
491
492 /* polling mode for non isp_abort commands. */
493 qla2x00_poll(ha->rsp_q_map[0]);
494 }
495
496 if (rval == QLA_FUNCTION_TIMEOUT &&
497 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
498 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
499 ha->flags.eeh_busy) {
500 /* not in dpc. schedule it for dpc to take over. */
501 ql_dbg(ql_dbg_mbx, vha, 0x101b,
502 "Timeout, schedule isp_abort_needed.\n");
503
504 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
505 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
506 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
507 if (IS_QLA82XX(ha)) {
508 ql_dbg(ql_dbg_mbx, vha, 0x112a,
509 "disabling pause transmit on port "
510 "0 & 1.\n");
511 qla82xx_wr_32(ha,
512 QLA82XX_CRB_NIU + 0x98,
513 CRB_NIU_XG_PAUSE_CTL_P0|
514 CRB_NIU_XG_PAUSE_CTL_P1);
515 }
516 ql_log(ql_log_info, base_vha, 0x101c,
517 "Mailbox cmd timeout occurred, cmd=0x%x, "
518 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
519 "abort.\n", command, mcp->mb[0],
520 ha->flags.eeh_busy);
521 vha->hw_err_cnt++;
522 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
523 qla2xxx_wake_dpc(vha);
524 }
525 } else if (current == ha->dpc_thread) {
526 /* call abort directly since we are in the DPC thread */
527 ql_dbg(ql_dbg_mbx, vha, 0x101d,
528 "Timeout, calling abort_isp.\n");
529
530 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
531 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
532 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
533 if (IS_QLA82XX(ha)) {
534 ql_dbg(ql_dbg_mbx, vha, 0x112b,
535 "disabling pause transmit on port "
536 "0 & 1.\n");
537 qla82xx_wr_32(ha,
538 QLA82XX_CRB_NIU + 0x98,
539 CRB_NIU_XG_PAUSE_CTL_P0|
540 CRB_NIU_XG_PAUSE_CTL_P1);
541 }
542 ql_log(ql_log_info, base_vha, 0x101e,
543 "Mailbox cmd timeout occurred, cmd=0x%x, "
544 "mb[0]=0x%x. Scheduling ISP abort ",
545 command, mcp->mb[0]);
546 vha->hw_err_cnt++;
547 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
548 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
549 /* Allow next mbx cmd to come in. */
550 complete(&ha->mbx_cmd_comp);
551 if (ha->isp_ops->abort_isp(vha) &&
552 !ha->flags.eeh_busy) {
553 /* Failed. retry later. */
554 set_bit(ISP_ABORT_NEEDED,
555 &vha->dpc_flags);
556 }
557 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
558 ql_dbg(ql_dbg_mbx, vha, 0x101f,
559 "Finished abort_isp.\n");
560 goto mbx_done;
561 }
562 }
563 }
564
565 premature_exit:
566 /* Allow next mbx cmd to come in. */
567 complete(&ha->mbx_cmd_comp);
568
569 mbx_done:
570 if (rval == QLA_ABORTED) {
571 ql_log(ql_log_info, vha, 0xd035,
572 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
573 mcp->mb[0]);
574 } else if (rval) {
575 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
576 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
577 dev_name(&ha->pdev->dev), 0x1020+0x800,
578 vha->host_no, rval);
579 mboxes = mcp->in_mb;
580 cnt = 4;
581 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
582 if (mboxes & BIT_0) {
583 printk(" mb[%u]=%x", i, mcp->mb[i]);
584 cnt--;
585 }
586 pr_warn(" cmd=%x ****\n", command);
587 }
588 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
589 ql_dbg(ql_dbg_mbx, vha, 0x1198,
590 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
591 rd_reg_dword(®->isp24.host_status),
592 rd_reg_dword(®->isp24.ictrl),
593 rd_reg_dword(®->isp24.istatus));
594 } else {
595 ql_dbg(ql_dbg_mbx, vha, 0x1206,
596 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
597 rd_reg_word(®->isp.ctrl_status),
598 rd_reg_word(®->isp.ictrl),
599 rd_reg_word(®->isp.istatus));
600 }
601 } else {
602 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
603 }
604
605 i = 500;
606 while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
607 /*
608 * The caller of this mailbox encounter pci error.
609 * Hold the thread until PCIE link reset complete to make
610 * sure caller does not unmap dma while recovery is
611 * in progress.
612 */
613 msleep(1);
614 i--;
615 }
616 return rval;
617 }
618
619 int
qla2x00_load_ram(scsi_qla_host_t * vha,dma_addr_t req_dma,uint32_t risc_addr,uint32_t risc_code_size)620 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
621 uint32_t risc_code_size)
622 {
623 int rval;
624 struct qla_hw_data *ha = vha->hw;
625 mbx_cmd_t mc;
626 mbx_cmd_t *mcp = &mc;
627
628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
629 "Entered %s.\n", __func__);
630
631 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
632 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
633 mcp->mb[8] = MSW(risc_addr);
634 mcp->out_mb = MBX_8|MBX_0;
635 } else {
636 mcp->mb[0] = MBC_LOAD_RISC_RAM;
637 mcp->out_mb = MBX_0;
638 }
639 mcp->mb[1] = LSW(risc_addr);
640 mcp->mb[2] = MSW(req_dma);
641 mcp->mb[3] = LSW(req_dma);
642 mcp->mb[6] = MSW(MSD(req_dma));
643 mcp->mb[7] = LSW(MSD(req_dma));
644 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
645 if (IS_FWI2_CAPABLE(ha)) {
646 mcp->mb[4] = MSW(risc_code_size);
647 mcp->mb[5] = LSW(risc_code_size);
648 mcp->out_mb |= MBX_5|MBX_4;
649 } else {
650 mcp->mb[4] = LSW(risc_code_size);
651 mcp->out_mb |= MBX_4;
652 }
653
654 mcp->in_mb = MBX_1|MBX_0;
655 mcp->tov = MBX_TOV_SECONDS;
656 mcp->flags = 0;
657 rval = qla2x00_mailbox_command(vha, mcp);
658
659 if (rval != QLA_SUCCESS) {
660 ql_dbg(ql_dbg_mbx, vha, 0x1023,
661 "Failed=%x mb[0]=%x mb[1]=%x.\n",
662 rval, mcp->mb[0], mcp->mb[1]);
663 vha->hw_err_cnt++;
664 } else {
665 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
666 "Done %s.\n", __func__);
667 }
668
669 return rval;
670 }
671
672 #define NVME_ENABLE_FLAG BIT_3
673 #define EDIF_HW_SUPPORT BIT_10
674
675 /*
676 * qla2x00_execute_fw
677 * Start adapter firmware.
678 *
679 * Input:
680 * ha = adapter block pointer.
681 * TARGET_QUEUE_LOCK must be released.
682 * ADAPTER_STATE_LOCK must be released.
683 *
684 * Returns:
685 * qla2x00 local function return status code.
686 *
687 * Context:
688 * Kernel context.
689 */
690 int
qla2x00_execute_fw(scsi_qla_host_t * vha,uint32_t risc_addr)691 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
692 {
693 int rval;
694 struct qla_hw_data *ha = vha->hw;
695 mbx_cmd_t mc;
696 mbx_cmd_t *mcp = &mc;
697 u8 semaphore = 0;
698 #define EXE_FW_FORCE_SEMAPHORE BIT_7
699 u8 retry = 5;
700
701 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
702 "Entered %s.\n", __func__);
703
704 again:
705 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
706 mcp->out_mb = MBX_0;
707 mcp->in_mb = MBX_0;
708 if (IS_FWI2_CAPABLE(ha)) {
709 mcp->mb[1] = MSW(risc_addr);
710 mcp->mb[2] = LSW(risc_addr);
711 mcp->mb[3] = 0;
712 mcp->mb[4] = 0;
713 mcp->mb[11] = 0;
714
715 /* Enable BPM? */
716 if (ha->flags.lr_detected) {
717 mcp->mb[4] = BIT_0;
718 if (IS_BPM_RANGE_CAPABLE(ha))
719 mcp->mb[4] |=
720 ha->lr_distance << LR_DIST_FW_POS;
721 }
722
723 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
724 mcp->mb[4] |= NVME_ENABLE_FLAG;
725
726 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
727 struct nvram_81xx *nv = ha->nvram;
728 /* set minimum speed if specified in nvram */
729 if (nv->min_supported_speed >= 2 &&
730 nv->min_supported_speed <= 5) {
731 mcp->mb[4] |= BIT_4;
732 mcp->mb[11] |= nv->min_supported_speed & 0xF;
733 mcp->out_mb |= MBX_11;
734 mcp->in_mb |= BIT_5;
735 vha->min_supported_speed =
736 nv->min_supported_speed;
737 }
738
739 if (IS_PPCARCH)
740 mcp->mb[11] |= BIT_4;
741 }
742
743 if (ha->flags.exlogins_enabled)
744 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
745
746 if (ha->flags.exchoffld_enabled)
747 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
748
749 if (semaphore)
750 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
751
752 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
753 mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1;
754 } else {
755 mcp->mb[1] = LSW(risc_addr);
756 mcp->out_mb |= MBX_1;
757 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
758 mcp->mb[2] = 0;
759 mcp->out_mb |= MBX_2;
760 }
761 }
762
763 mcp->tov = MBX_TOV_SECONDS;
764 mcp->flags = 0;
765 rval = qla2x00_mailbox_command(vha, mcp);
766
767 if (rval != QLA_SUCCESS) {
768 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
769 mcp->mb[1] == 0x27 && retry) {
770 semaphore = 1;
771 retry--;
772 ql_dbg(ql_dbg_async, vha, 0x1026,
773 "Exe FW: force semaphore.\n");
774 goto again;
775 }
776
777 if (retry) {
778 retry--;
779 ql_dbg(ql_dbg_async, vha, 0x509d,
780 "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry);
781 goto again;
782 }
783 ql_dbg(ql_dbg_mbx, vha, 0x1026,
784 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
785 vha->hw_err_cnt++;
786 return rval;
787 }
788
789 if (!IS_FWI2_CAPABLE(ha))
790 goto done;
791
792 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
793 ql_dbg(ql_dbg_mbx, vha, 0x119a,
794 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
795 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
796 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
797 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
798 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
799 ha->max_supported_speed == 0 ? "16Gps" :
800 ha->max_supported_speed == 1 ? "32Gps" :
801 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
802 if (vha->min_supported_speed) {
803 ha->min_supported_speed = mcp->mb[5] &
804 (BIT_0 | BIT_1 | BIT_2);
805 ql_dbg(ql_dbg_mbx, vha, 0x119c,
806 "min_supported_speed=%s.\n",
807 ha->min_supported_speed == 6 ? "64Gps" :
808 ha->min_supported_speed == 5 ? "32Gps" :
809 ha->min_supported_speed == 4 ? "16Gps" :
810 ha->min_supported_speed == 3 ? "8Gps" :
811 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
812 }
813 }
814
815 if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) {
816 ha->flags.edif_hw = 1;
817 ql_log(ql_log_info, vha, 0xffff,
818 "%s: edif HW\n", __func__);
819 }
820
821 done:
822 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
823 "Done %s.\n", __func__);
824
825 return rval;
826 }
827
828 /*
829 * qla2x00_load_flash_firmware
830 * Load firmware from flash.
831 *
832 * Input:
833 * vha = adapter block pointer.
834 *
835 * Returns:
836 * qla28xx local function return status code.
837 *
838 * Context:
839 * Kernel context.
840 */
841 int
qla28xx_load_flash_firmware(scsi_qla_host_t * vha)842 qla28xx_load_flash_firmware(scsi_qla_host_t *vha)
843 {
844 struct qla_hw_data *ha = vha->hw;
845 int rval = QLA_COMMAND_ERROR;
846 mbx_cmd_t mc;
847 mbx_cmd_t *mcp = &mc;
848
849 if (!IS_QLA28XX(ha))
850 return rval;
851
852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a6,
853 "Entered %s.\n", __func__);
854
855 mcp->mb[0] = MBC_LOAD_FLASH_FIRMWARE;
856 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
857 mcp->in_mb = MBX_0;
858 mcp->tov = MBX_TOV_SECONDS;
859 mcp->flags = 0;
860 rval = qla2x00_mailbox_command(vha, mcp);
861
862 if (rval != QLA_SUCCESS) {
863 ql_dbg(ql_log_info, vha, 0x11a7,
864 "Failed=%x cmd error=%x img error=%x.\n",
865 rval, mcp->mb[1], mcp->mb[2]);
866 } else {
867 ql_dbg(ql_log_info, vha, 0x11a8,
868 "Done %s.\n", __func__);
869 }
870
871 return rval;
872 }
873
874
875 /*
876 * qla_get_exlogin_status
877 * Get extended login status
878 * uses the memory offload control/status Mailbox
879 *
880 * Input:
881 * ha: adapter state pointer.
882 * fwopt: firmware options
883 *
884 * Returns:
885 * qla2x00 local function status
886 *
887 * Context:
888 * Kernel context.
889 */
890 #define FETCH_XLOGINS_STAT 0x8
891 int
qla_get_exlogin_status(scsi_qla_host_t * vha,uint16_t * buf_sz,uint16_t * ex_logins_cnt)892 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
893 uint16_t *ex_logins_cnt)
894 {
895 int rval;
896 mbx_cmd_t mc;
897 mbx_cmd_t *mcp = &mc;
898
899 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
900 "Entered %s\n", __func__);
901
902 memset(mcp->mb, 0 , sizeof(mcp->mb));
903 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
904 mcp->mb[1] = FETCH_XLOGINS_STAT;
905 mcp->out_mb = MBX_1|MBX_0;
906 mcp->in_mb = MBX_10|MBX_4|MBX_0;
907 mcp->tov = MBX_TOV_SECONDS;
908 mcp->flags = 0;
909
910 rval = qla2x00_mailbox_command(vha, mcp);
911 if (rval != QLA_SUCCESS) {
912 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
913 } else {
914 *buf_sz = mcp->mb[4];
915 *ex_logins_cnt = mcp->mb[10];
916
917 ql_log(ql_log_info, vha, 0x1190,
918 "buffer size 0x%x, exchange login count=%d\n",
919 mcp->mb[4], mcp->mb[10]);
920
921 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
922 "Done %s.\n", __func__);
923 }
924
925 return rval;
926 }
927
928 /*
929 * qla_set_exlogin_mem_cfg
930 * set extended login memory configuration
931 * Mbx needs to be issues before init_cb is set
932 *
933 * Input:
934 * ha: adapter state pointer.
935 * buffer: buffer pointer
936 * phys_addr: physical address of buffer
937 * size: size of buffer
938 * TARGET_QUEUE_LOCK must be released
939 * ADAPTER_STATE_LOCK must be release
940 *
941 * Returns:
942 * qla2x00 local funxtion status code.
943 *
944 * Context:
945 * Kernel context.
946 */
947 #define CONFIG_XLOGINS_MEM 0x9
948 int
qla_set_exlogin_mem_cfg(scsi_qla_host_t * vha,dma_addr_t phys_addr)949 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
950 {
951 int rval;
952 mbx_cmd_t mc;
953 mbx_cmd_t *mcp = &mc;
954 struct qla_hw_data *ha = vha->hw;
955
956 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
957 "Entered %s.\n", __func__);
958
959 memset(mcp->mb, 0 , sizeof(mcp->mb));
960 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
961 mcp->mb[1] = CONFIG_XLOGINS_MEM;
962 mcp->mb[2] = MSW(phys_addr);
963 mcp->mb[3] = LSW(phys_addr);
964 mcp->mb[6] = MSW(MSD(phys_addr));
965 mcp->mb[7] = LSW(MSD(phys_addr));
966 mcp->mb[8] = MSW(ha->exlogin_size);
967 mcp->mb[9] = LSW(ha->exlogin_size);
968 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
969 mcp->in_mb = MBX_11|MBX_0;
970 mcp->tov = MBX_TOV_SECONDS;
971 mcp->flags = 0;
972 rval = qla2x00_mailbox_command(vha, mcp);
973 if (rval != QLA_SUCCESS) {
974 ql_dbg(ql_dbg_mbx, vha, 0x111b,
975 "EXlogin Failed=%x. MB0=%x MB11=%x\n",
976 rval, mcp->mb[0], mcp->mb[11]);
977 } else {
978 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
979 "Done %s.\n", __func__);
980 }
981
982 return rval;
983 }
984
985 /*
986 * qla_get_exchoffld_status
987 * Get exchange offload status
988 * uses the memory offload control/status Mailbox
989 *
990 * Input:
991 * ha: adapter state pointer.
992 * fwopt: firmware options
993 *
994 * Returns:
995 * qla2x00 local function status
996 *
997 * Context:
998 * Kernel context.
999 */
1000 #define FETCH_XCHOFFLD_STAT 0x2
1001 int
qla_get_exchoffld_status(scsi_qla_host_t * vha,uint16_t * buf_sz,uint16_t * ex_logins_cnt)1002 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
1003 uint16_t *ex_logins_cnt)
1004 {
1005 int rval;
1006 mbx_cmd_t mc;
1007 mbx_cmd_t *mcp = &mc;
1008
1009 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
1010 "Entered %s\n", __func__);
1011
1012 memset(mcp->mb, 0 , sizeof(mcp->mb));
1013 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1014 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
1015 mcp->out_mb = MBX_1|MBX_0;
1016 mcp->in_mb = MBX_10|MBX_4|MBX_0;
1017 mcp->tov = MBX_TOV_SECONDS;
1018 mcp->flags = 0;
1019
1020 rval = qla2x00_mailbox_command(vha, mcp);
1021 if (rval != QLA_SUCCESS) {
1022 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
1023 } else {
1024 *buf_sz = mcp->mb[4];
1025 *ex_logins_cnt = mcp->mb[10];
1026
1027 ql_log(ql_log_info, vha, 0x118e,
1028 "buffer size 0x%x, exchange offload count=%d\n",
1029 mcp->mb[4], mcp->mb[10]);
1030
1031 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
1032 "Done %s.\n", __func__);
1033 }
1034
1035 return rval;
1036 }
1037
1038 /*
1039 * qla_set_exchoffld_mem_cfg
1040 * Set exchange offload memory configuration
1041 * Mbx needs to be issues before init_cb is set
1042 *
1043 * Input:
1044 * ha: adapter state pointer.
1045 * buffer: buffer pointer
1046 * phys_addr: physical address of buffer
1047 * size: size of buffer
1048 * TARGET_QUEUE_LOCK must be released
1049 * ADAPTER_STATE_LOCK must be release
1050 *
1051 * Returns:
1052 * qla2x00 local funxtion status code.
1053 *
1054 * Context:
1055 * Kernel context.
1056 */
1057 #define CONFIG_XCHOFFLD_MEM 0x3
1058 int
qla_set_exchoffld_mem_cfg(scsi_qla_host_t * vha)1059 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
1060 {
1061 int rval;
1062 mbx_cmd_t mc;
1063 mbx_cmd_t *mcp = &mc;
1064 struct qla_hw_data *ha = vha->hw;
1065
1066 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
1067 "Entered %s.\n", __func__);
1068
1069 memset(mcp->mb, 0 , sizeof(mcp->mb));
1070 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1071 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1072 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1073 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1074 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1075 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1076 mcp->mb[8] = MSW(ha->exchoffld_size);
1077 mcp->mb[9] = LSW(ha->exchoffld_size);
1078 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1079 mcp->in_mb = MBX_11|MBX_0;
1080 mcp->tov = MBX_TOV_SECONDS;
1081 mcp->flags = 0;
1082 rval = qla2x00_mailbox_command(vha, mcp);
1083 if (rval != QLA_SUCCESS) {
1084 /*EMPTY*/
1085 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1086 } else {
1087 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1088 "Done %s.\n", __func__);
1089 }
1090
1091 return rval;
1092 }
1093
1094 /*
1095 * qla2x00_get_fw_version
1096 * Get firmware version.
1097 *
1098 * Input:
1099 * ha: adapter state pointer.
1100 * major: pointer for major number.
1101 * minor: pointer for minor number.
1102 * subminor: pointer for subminor number.
1103 *
1104 * Returns:
1105 * qla2x00 local function return status code.
1106 *
1107 * Context:
1108 * Kernel context.
1109 */
1110 int
qla2x00_get_fw_version(scsi_qla_host_t * vha)1111 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1112 {
1113 int rval;
1114 mbx_cmd_t mc;
1115 mbx_cmd_t *mcp = &mc;
1116 struct qla_hw_data *ha = vha->hw;
1117
1118 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1119 "Entered %s.\n", __func__);
1120
1121 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1122 mcp->out_mb = MBX_0;
1123 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1124 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1125 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1126 if (IS_FWI2_CAPABLE(ha))
1127 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1128 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1129 mcp->in_mb |=
1130 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1131 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1132
1133 mcp->flags = 0;
1134 mcp->tov = MBX_TOV_SECONDS;
1135 rval = qla2x00_mailbox_command(vha, mcp);
1136 if (rval != QLA_SUCCESS)
1137 goto failed;
1138
1139 /* Return mailbox data. */
1140 ha->fw_major_version = mcp->mb[1];
1141 ha->fw_minor_version = mcp->mb[2];
1142 ha->fw_subminor_version = mcp->mb[3];
1143 ha->fw_attributes = mcp->mb[6];
1144 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1145 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1146 else
1147 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1148
1149 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1150 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1151 ha->mpi_version[1] = mcp->mb[11] >> 8;
1152 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1153 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1154 ha->phy_version[0] = mcp->mb[8] & 0xff;
1155 ha->phy_version[1] = mcp->mb[9] >> 8;
1156 ha->phy_version[2] = mcp->mb[9] & 0xff;
1157 }
1158
1159 if (IS_FWI2_CAPABLE(ha)) {
1160 ha->fw_attributes_h = mcp->mb[15];
1161 ha->fw_attributes_ext[0] = mcp->mb[16];
1162 ha->fw_attributes_ext[1] = mcp->mb[17];
1163 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1164 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1165 __func__, mcp->mb[15], mcp->mb[6]);
1166 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1167 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1168 __func__, mcp->mb[17], mcp->mb[16]);
1169
1170 if (ha->fw_attributes_h & 0x4)
1171 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1172 "%s: Firmware supports Extended Login 0x%x\n",
1173 __func__, ha->fw_attributes_h);
1174
1175 if (ha->fw_attributes_h & 0x8)
1176 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1177 "%s: Firmware supports Exchange Offload 0x%x\n",
1178 __func__, ha->fw_attributes_h);
1179
1180 /*
1181 * FW supports nvme and driver load parameter requested nvme.
1182 * BIT 26 of fw_attributes indicates NVMe support.
1183 */
1184 if ((ha->fw_attributes_h &
1185 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1186 ql2xnvmeenable) {
1187 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1188 vha->flags.nvme_first_burst = 1;
1189
1190 vha->flags.nvme_enabled = 1;
1191 ql_log(ql_log_info, vha, 0xd302,
1192 "%s: FC-NVMe is Enabled (0x%x)\n",
1193 __func__, ha->fw_attributes_h);
1194 }
1195
1196 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */
1197 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
1198 ql_log(ql_log_info, vha, 0xd302,
1199 "Firmware supports NVMe2 0x%x\n",
1200 ha->fw_attributes_ext[0]);
1201 vha->flags.nvme2_enabled = 1;
1202 }
1203
1204 if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable &&
1205 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) {
1206 ha->flags.edif_enabled = 1;
1207 ql_log(ql_log_info, vha, 0xffff,
1208 "%s: edif is enabled\n", __func__);
1209 }
1210 }
1211
1212 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1213 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1214 ha->serdes_version[1] = mcp->mb[8] >> 8;
1215 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1216 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1217 ha->mpi_version[1] = mcp->mb[11] >> 8;
1218 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1219 ha->pep_version[0] = mcp->mb[13] & 0xff;
1220 ha->pep_version[1] = mcp->mb[14] >> 8;
1221 ha->pep_version[2] = mcp->mb[14] & 0xff;
1222 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1223 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1224 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1225 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1226 if (IS_QLA28XX(ha)) {
1227 if (mcp->mb[16] & BIT_10)
1228 ha->flags.secure_fw = 1;
1229
1230 ql_log(ql_log_info, vha, 0xffff,
1231 "Secure Flash Update in FW: %s\n",
1232 (ha->flags.secure_fw) ? "Supported" :
1233 "Not Supported");
1234 }
1235
1236 if (ha->flags.scm_supported_a &&
1237 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
1238 ha->flags.scm_supported_f = 1;
1239 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
1240 }
1241 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
1242 (ha->flags.scm_supported_f) ? "Supported" :
1243 "Not Supported");
1244
1245 if (vha->flags.nvme2_enabled) {
1246 /* set BIT_15 of special feature control block for SLER */
1247 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
1248 /* set BIT_14 of special feature control block for PI CTRL*/
1249 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
1250 }
1251 }
1252
1253 failed:
1254 if (rval != QLA_SUCCESS) {
1255 /*EMPTY*/
1256 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1257 } else {
1258 /*EMPTY*/
1259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1260 "Done %s.\n", __func__);
1261 }
1262 return rval;
1263 }
1264
1265 /*
1266 * qla2x00_get_fw_options
1267 * Set firmware options.
1268 *
1269 * Input:
1270 * ha = adapter block pointer.
1271 * fwopt = pointer for firmware options.
1272 *
1273 * Returns:
1274 * qla2x00 local function return status code.
1275 *
1276 * Context:
1277 * Kernel context.
1278 */
1279 int
qla2x00_get_fw_options(scsi_qla_host_t * vha,uint16_t * fwopts)1280 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1281 {
1282 int rval;
1283 mbx_cmd_t mc;
1284 mbx_cmd_t *mcp = &mc;
1285
1286 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1287 "Entered %s.\n", __func__);
1288
1289 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1290 mcp->out_mb = MBX_0;
1291 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1292 mcp->tov = MBX_TOV_SECONDS;
1293 mcp->flags = 0;
1294 rval = qla2x00_mailbox_command(vha, mcp);
1295
1296 if (rval != QLA_SUCCESS) {
1297 /*EMPTY*/
1298 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1299 } else {
1300 fwopts[0] = mcp->mb[0];
1301 fwopts[1] = mcp->mb[1];
1302 fwopts[2] = mcp->mb[2];
1303 fwopts[3] = mcp->mb[3];
1304
1305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1306 "Done %s.\n", __func__);
1307 }
1308
1309 return rval;
1310 }
1311
1312
1313 /*
1314 * qla2x00_set_fw_options
1315 * Set firmware options.
1316 *
1317 * Input:
1318 * ha = adapter block pointer.
1319 * fwopt = pointer for firmware options.
1320 *
1321 * Returns:
1322 * qla2x00 local function return status code.
1323 *
1324 * Context:
1325 * Kernel context.
1326 */
1327 int
qla2x00_set_fw_options(scsi_qla_host_t * vha,uint16_t * fwopts)1328 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1329 {
1330 int rval;
1331 mbx_cmd_t mc;
1332 mbx_cmd_t *mcp = &mc;
1333
1334 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1335 "Entered %s.\n", __func__);
1336
1337 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1338 mcp->mb[1] = fwopts[1];
1339 mcp->mb[2] = fwopts[2];
1340 mcp->mb[3] = fwopts[3];
1341 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1342 mcp->in_mb = MBX_0;
1343 if (IS_FWI2_CAPABLE(vha->hw)) {
1344 mcp->in_mb |= MBX_1;
1345 mcp->mb[10] = fwopts[10];
1346 mcp->out_mb |= MBX_10;
1347 } else {
1348 mcp->mb[10] = fwopts[10];
1349 mcp->mb[11] = fwopts[11];
1350 mcp->mb[12] = 0; /* Undocumented, but used */
1351 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1352 }
1353 mcp->tov = MBX_TOV_SECONDS;
1354 mcp->flags = 0;
1355 rval = qla2x00_mailbox_command(vha, mcp);
1356
1357 fwopts[0] = mcp->mb[0];
1358
1359 if (rval != QLA_SUCCESS) {
1360 /*EMPTY*/
1361 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1362 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1363 } else {
1364 /*EMPTY*/
1365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1366 "Done %s.\n", __func__);
1367 }
1368
1369 return rval;
1370 }
1371
1372 /*
1373 * qla2x00_mbx_reg_test
1374 * Mailbox register wrap test.
1375 *
1376 * Input:
1377 * ha = adapter block pointer.
1378 * TARGET_QUEUE_LOCK must be released.
1379 * ADAPTER_STATE_LOCK must be released.
1380 *
1381 * Returns:
1382 * qla2x00 local function return status code.
1383 *
1384 * Context:
1385 * Kernel context.
1386 */
1387 int
qla2x00_mbx_reg_test(scsi_qla_host_t * vha)1388 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1389 {
1390 int rval;
1391 mbx_cmd_t mc;
1392 mbx_cmd_t *mcp = &mc;
1393
1394 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1395 "Entered %s.\n", __func__);
1396
1397 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1398 mcp->mb[1] = 0xAAAA;
1399 mcp->mb[2] = 0x5555;
1400 mcp->mb[3] = 0xAA55;
1401 mcp->mb[4] = 0x55AA;
1402 mcp->mb[5] = 0xA5A5;
1403 mcp->mb[6] = 0x5A5A;
1404 mcp->mb[7] = 0x2525;
1405 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1406 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1407 mcp->tov = MBX_TOV_SECONDS;
1408 mcp->flags = 0;
1409 rval = qla2x00_mailbox_command(vha, mcp);
1410
1411 if (rval == QLA_SUCCESS) {
1412 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1413 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1414 rval = QLA_FUNCTION_FAILED;
1415 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1416 mcp->mb[7] != 0x2525)
1417 rval = QLA_FUNCTION_FAILED;
1418 }
1419
1420 if (rval != QLA_SUCCESS) {
1421 /*EMPTY*/
1422 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1423 vha->hw_err_cnt++;
1424 } else {
1425 /*EMPTY*/
1426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1427 "Done %s.\n", __func__);
1428 }
1429
1430 return rval;
1431 }
1432
1433 /*
1434 * qla2x00_verify_checksum
1435 * Verify firmware checksum.
1436 *
1437 * Input:
1438 * ha = adapter block pointer.
1439 * TARGET_QUEUE_LOCK must be released.
1440 * ADAPTER_STATE_LOCK must be released.
1441 *
1442 * Returns:
1443 * qla2x00 local function return status code.
1444 *
1445 * Context:
1446 * Kernel context.
1447 */
1448 int
qla2x00_verify_checksum(scsi_qla_host_t * vha,uint32_t risc_addr)1449 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1450 {
1451 int rval;
1452 mbx_cmd_t mc;
1453 mbx_cmd_t *mcp = &mc;
1454
1455 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1456 "Entered %s.\n", __func__);
1457
1458 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1459 mcp->out_mb = MBX_0;
1460 mcp->in_mb = MBX_0;
1461 if (IS_FWI2_CAPABLE(vha->hw)) {
1462 mcp->mb[1] = MSW(risc_addr);
1463 mcp->mb[2] = LSW(risc_addr);
1464 mcp->out_mb |= MBX_2|MBX_1;
1465 mcp->in_mb |= MBX_2|MBX_1;
1466 } else {
1467 mcp->mb[1] = LSW(risc_addr);
1468 mcp->out_mb |= MBX_1;
1469 mcp->in_mb |= MBX_1;
1470 }
1471
1472 mcp->tov = MBX_TOV_SECONDS;
1473 mcp->flags = 0;
1474 rval = qla2x00_mailbox_command(vha, mcp);
1475
1476 if (rval != QLA_SUCCESS) {
1477 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1478 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1479 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1480 } else {
1481 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1482 "Done %s.\n", __func__);
1483 }
1484
1485 return rval;
1486 }
1487
1488 /*
1489 * qla2x00_issue_iocb
1490 * Issue IOCB using mailbox command
1491 *
1492 * Input:
1493 * ha = adapter state pointer.
1494 * buffer = buffer pointer.
1495 * phys_addr = physical address of buffer.
1496 * size = size of buffer.
1497 * TARGET_QUEUE_LOCK must be released.
1498 * ADAPTER_STATE_LOCK must be released.
1499 *
1500 * Returns:
1501 * qla2x00 local function return status code.
1502 *
1503 * Context:
1504 * Kernel context.
1505 */
1506 int
qla2x00_issue_iocb_timeout(scsi_qla_host_t * vha,void * buffer,dma_addr_t phys_addr,size_t size,uint32_t tov)1507 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1508 dma_addr_t phys_addr, size_t size, uint32_t tov)
1509 {
1510 int rval;
1511 mbx_cmd_t mc;
1512 mbx_cmd_t *mcp = &mc;
1513
1514 if (!vha->hw->flags.fw_started)
1515 return QLA_INVALID_COMMAND;
1516
1517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1518 "Entered %s.\n", __func__);
1519
1520 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1521 mcp->mb[1] = 0;
1522 mcp->mb[2] = MSW(LSD(phys_addr));
1523 mcp->mb[3] = LSW(LSD(phys_addr));
1524 mcp->mb[6] = MSW(MSD(phys_addr));
1525 mcp->mb[7] = LSW(MSD(phys_addr));
1526 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1527 mcp->in_mb = MBX_1|MBX_0;
1528 mcp->tov = tov;
1529 mcp->flags = 0;
1530 rval = qla2x00_mailbox_command(vha, mcp);
1531
1532 if (rval != QLA_SUCCESS) {
1533 /*EMPTY*/
1534 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1535 } else {
1536 sts_entry_t *sts_entry = buffer;
1537
1538 /* Mask reserved bits. */
1539 sts_entry->entry_status &=
1540 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1541 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1542 "Done %s (status=%x).\n", __func__,
1543 sts_entry->entry_status);
1544 }
1545
1546 return rval;
1547 }
1548
1549 int
qla2x00_issue_iocb(scsi_qla_host_t * vha,void * buffer,dma_addr_t phys_addr,size_t size)1550 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1551 size_t size)
1552 {
1553 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1554 MBX_TOV_SECONDS);
1555 }
1556
1557 /*
1558 * qla2x00_abort_command
1559 * Abort command aborts a specified IOCB.
1560 *
1561 * Input:
1562 * ha = adapter block pointer.
1563 * sp = SB structure pointer.
1564 *
1565 * Returns:
1566 * qla2x00 local function return status code.
1567 *
1568 * Context:
1569 * Kernel context.
1570 */
1571 int
qla2x00_abort_command(srb_t * sp)1572 qla2x00_abort_command(srb_t *sp)
1573 {
1574 unsigned long flags = 0;
1575 int rval;
1576 uint32_t handle = 0;
1577 mbx_cmd_t mc;
1578 mbx_cmd_t *mcp = &mc;
1579 fc_port_t *fcport = sp->fcport;
1580 scsi_qla_host_t *vha = fcport->vha;
1581 struct qla_hw_data *ha = vha->hw;
1582 struct req_que *req;
1583 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1584
1585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1586 "Entered %s.\n", __func__);
1587
1588 if (sp->qpair)
1589 req = sp->qpair->req;
1590 else
1591 req = vha->req;
1592
1593 spin_lock_irqsave(&ha->hardware_lock, flags);
1594 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1595 if (req->outstanding_cmds[handle] == sp)
1596 break;
1597 }
1598 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1599
1600 if (handle == req->num_outstanding_cmds) {
1601 /* command not found */
1602 return QLA_FUNCTION_FAILED;
1603 }
1604
1605 mcp->mb[0] = MBC_ABORT_COMMAND;
1606 if (HAS_EXTENDED_IDS(ha))
1607 mcp->mb[1] = fcport->loop_id;
1608 else
1609 mcp->mb[1] = fcport->loop_id << 8;
1610 mcp->mb[2] = (uint16_t)handle;
1611 mcp->mb[3] = (uint16_t)(handle >> 16);
1612 mcp->mb[6] = (uint16_t)cmd->device->lun;
1613 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1614 mcp->in_mb = MBX_0;
1615 mcp->tov = MBX_TOV_SECONDS;
1616 mcp->flags = 0;
1617 rval = qla2x00_mailbox_command(vha, mcp);
1618
1619 if (rval != QLA_SUCCESS) {
1620 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1621 } else {
1622 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1623 "Done %s.\n", __func__);
1624 }
1625
1626 return rval;
1627 }
1628
1629 int
qla2x00_abort_target(struct fc_port * fcport,uint64_t l,int tag)1630 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1631 {
1632 int rval, rval2;
1633 mbx_cmd_t mc;
1634 mbx_cmd_t *mcp = &mc;
1635 scsi_qla_host_t *vha;
1636
1637 vha = fcport->vha;
1638
1639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1640 "Entered %s.\n", __func__);
1641
1642 mcp->mb[0] = MBC_ABORT_TARGET;
1643 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1644 if (HAS_EXTENDED_IDS(vha->hw)) {
1645 mcp->mb[1] = fcport->loop_id;
1646 mcp->mb[10] = 0;
1647 mcp->out_mb |= MBX_10;
1648 } else {
1649 mcp->mb[1] = fcport->loop_id << 8;
1650 }
1651 mcp->mb[2] = vha->hw->loop_reset_delay;
1652 mcp->mb[9] = vha->vp_idx;
1653
1654 mcp->in_mb = MBX_0;
1655 mcp->tov = MBX_TOV_SECONDS;
1656 mcp->flags = 0;
1657 rval = qla2x00_mailbox_command(vha, mcp);
1658 if (rval != QLA_SUCCESS) {
1659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1660 "Failed=%x.\n", rval);
1661 }
1662
1663 /* Issue marker IOCB. */
1664 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1665 MK_SYNC_ID);
1666 if (rval2 != QLA_SUCCESS) {
1667 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1668 "Failed to issue marker IOCB (%x).\n", rval2);
1669 } else {
1670 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1671 "Done %s.\n", __func__);
1672 }
1673
1674 return rval;
1675 }
1676
1677 int
qla2x00_lun_reset(struct fc_port * fcport,uint64_t l,int tag)1678 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1679 {
1680 int rval, rval2;
1681 mbx_cmd_t mc;
1682 mbx_cmd_t *mcp = &mc;
1683 scsi_qla_host_t *vha;
1684
1685 vha = fcport->vha;
1686
1687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1688 "Entered %s.\n", __func__);
1689
1690 mcp->mb[0] = MBC_LUN_RESET;
1691 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1692 if (HAS_EXTENDED_IDS(vha->hw))
1693 mcp->mb[1] = fcport->loop_id;
1694 else
1695 mcp->mb[1] = fcport->loop_id << 8;
1696 mcp->mb[2] = (u32)l;
1697 mcp->mb[3] = 0;
1698 mcp->mb[9] = vha->vp_idx;
1699
1700 mcp->in_mb = MBX_0;
1701 mcp->tov = MBX_TOV_SECONDS;
1702 mcp->flags = 0;
1703 rval = qla2x00_mailbox_command(vha, mcp);
1704 if (rval != QLA_SUCCESS) {
1705 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1706 }
1707
1708 /* Issue marker IOCB. */
1709 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1710 MK_SYNC_ID_LUN);
1711 if (rval2 != QLA_SUCCESS) {
1712 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1713 "Failed to issue marker IOCB (%x).\n", rval2);
1714 } else {
1715 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1716 "Done %s.\n", __func__);
1717 }
1718
1719 return rval;
1720 }
1721
1722 /*
1723 * qla2x00_get_adapter_id
1724 * Get adapter ID and topology.
1725 *
1726 * Input:
1727 * ha = adapter block pointer.
1728 * id = pointer for loop ID.
1729 * al_pa = pointer for AL_PA.
1730 * area = pointer for area.
1731 * domain = pointer for domain.
1732 * top = pointer for topology.
1733 * TARGET_QUEUE_LOCK must be released.
1734 * ADAPTER_STATE_LOCK must be released.
1735 *
1736 * Returns:
1737 * qla2x00 local function return status code.
1738 *
1739 * Context:
1740 * Kernel context.
1741 */
1742 int
qla2x00_get_adapter_id(scsi_qla_host_t * vha,uint16_t * id,uint8_t * al_pa,uint8_t * area,uint8_t * domain,uint16_t * top,uint16_t * sw_cap)1743 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1744 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1745 {
1746 int rval;
1747 mbx_cmd_t mc;
1748 mbx_cmd_t *mcp = &mc;
1749
1750 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1751 "Entered %s.\n", __func__);
1752
1753 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1754 mcp->mb[9] = vha->vp_idx;
1755 mcp->out_mb = MBX_9|MBX_0;
1756 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1757 if (IS_CNA_CAPABLE(vha->hw))
1758 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1759 if (IS_FWI2_CAPABLE(vha->hw))
1760 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1761 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1762 mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23;
1763
1764 mcp->tov = MBX_TOV_SECONDS;
1765 mcp->flags = 0;
1766 rval = qla2x00_mailbox_command(vha, mcp);
1767 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1768 rval = QLA_COMMAND_ERROR;
1769 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1770 rval = QLA_INVALID_COMMAND;
1771
1772 /* Return data. */
1773 *id = mcp->mb[1];
1774 *al_pa = LSB(mcp->mb[2]);
1775 *area = MSB(mcp->mb[2]);
1776 *domain = LSB(mcp->mb[3]);
1777 *top = mcp->mb[6];
1778 *sw_cap = mcp->mb[7];
1779
1780 if (rval != QLA_SUCCESS) {
1781 /*EMPTY*/
1782 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1783 } else {
1784 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1785 "Done %s.\n", __func__);
1786
1787 if (IS_CNA_CAPABLE(vha->hw)) {
1788 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1789 vha->fcoe_fcf_idx = mcp->mb[10];
1790 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1791 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1792 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1793 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1794 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1795 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1796 }
1797 /* If FA-WWN supported */
1798 if (IS_FAWWN_CAPABLE(vha->hw)) {
1799 if (mcp->mb[7] & BIT_14) {
1800 vha->port_name[0] = MSB(mcp->mb[16]);
1801 vha->port_name[1] = LSB(mcp->mb[16]);
1802 vha->port_name[2] = MSB(mcp->mb[17]);
1803 vha->port_name[3] = LSB(mcp->mb[17]);
1804 vha->port_name[4] = MSB(mcp->mb[18]);
1805 vha->port_name[5] = LSB(mcp->mb[18]);
1806 vha->port_name[6] = MSB(mcp->mb[19]);
1807 vha->port_name[7] = LSB(mcp->mb[19]);
1808 fc_host_port_name(vha->host) =
1809 wwn_to_u64(vha->port_name);
1810 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1811 "FA-WWN acquired %016llx\n",
1812 wwn_to_u64(vha->port_name));
1813 }
1814 }
1815
1816 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1817 vha->bbcr = mcp->mb[15];
1818 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) {
1819 ql_log(ql_log_info, vha, 0x11a4,
1820 "SCM: EDC ELS completed, flags 0x%x\n",
1821 mcp->mb[21]);
1822 }
1823 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) {
1824 vha->hw->flags.scm_enabled = 1;
1825 vha->scm_fabric_connection_flags |=
1826 SCM_FLAG_RDF_COMPLETED;
1827 ql_log(ql_log_info, vha, 0x11a5,
1828 "SCM: RDF ELS completed, flags 0x%x\n",
1829 mcp->mb[23]);
1830 }
1831 }
1832 }
1833
1834 return rval;
1835 }
1836
1837 /*
1838 * qla2x00_get_retry_cnt
1839 * Get current firmware login retry count and delay.
1840 *
1841 * Input:
1842 * ha = adapter block pointer.
1843 * retry_cnt = pointer to login retry count.
1844 * tov = pointer to login timeout value.
1845 *
1846 * Returns:
1847 * qla2x00 local function return status code.
1848 *
1849 * Context:
1850 * Kernel context.
1851 */
1852 int
qla2x00_get_retry_cnt(scsi_qla_host_t * vha,uint8_t * retry_cnt,uint8_t * tov,uint16_t * r_a_tov)1853 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1854 uint16_t *r_a_tov)
1855 {
1856 int rval;
1857 uint16_t ratov;
1858 mbx_cmd_t mc;
1859 mbx_cmd_t *mcp = &mc;
1860
1861 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1862 "Entered %s.\n", __func__);
1863
1864 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1865 mcp->out_mb = MBX_0;
1866 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1867 mcp->tov = MBX_TOV_SECONDS;
1868 mcp->flags = 0;
1869 rval = qla2x00_mailbox_command(vha, mcp);
1870
1871 if (rval != QLA_SUCCESS) {
1872 /*EMPTY*/
1873 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1874 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1875 } else {
1876 /* Convert returned data and check our values. */
1877 *r_a_tov = mcp->mb[3] / 2;
1878 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1879 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1880 /* Update to the larger values */
1881 *retry_cnt = (uint8_t)mcp->mb[1];
1882 *tov = ratov;
1883 }
1884
1885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1886 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1887 }
1888
1889 return rval;
1890 }
1891
1892 /*
1893 * qla2x00_init_firmware
1894 * Initialize adapter firmware.
1895 *
1896 * Input:
1897 * ha = adapter block pointer.
1898 * dptr = Initialization control block pointer.
1899 * size = size of initialization control block.
1900 * TARGET_QUEUE_LOCK must be released.
1901 * ADAPTER_STATE_LOCK must be released.
1902 *
1903 * Returns:
1904 * qla2x00 local function return status code.
1905 *
1906 * Context:
1907 * Kernel context.
1908 */
1909 int
qla2x00_init_firmware(scsi_qla_host_t * vha,uint16_t size)1910 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1911 {
1912 int rval;
1913 mbx_cmd_t mc;
1914 mbx_cmd_t *mcp = &mc;
1915 struct qla_hw_data *ha = vha->hw;
1916
1917 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1918 "Entered %s.\n", __func__);
1919
1920 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1921 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1922 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1923
1924 if (ha->flags.npiv_supported)
1925 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1926 else
1927 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1928
1929 mcp->mb[1] = 0;
1930 mcp->mb[2] = MSW(ha->init_cb_dma);
1931 mcp->mb[3] = LSW(ha->init_cb_dma);
1932 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1933 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1934 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1935 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1936 mcp->mb[1] = BIT_0;
1937 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1938 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1939 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1940 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1941 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1942 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1943 }
1944
1945 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
1946 mcp->mb[1] |= BIT_1;
1947 mcp->mb[16] = MSW(ha->sf_init_cb_dma);
1948 mcp->mb[17] = LSW(ha->sf_init_cb_dma);
1949 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
1950 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
1951 mcp->mb[15] = sizeof(*ha->sf_init_cb);
1952 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15;
1953 }
1954
1955 /* 1 and 2 should normally be captured. */
1956 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1957 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1958 /* mb3 is additional info about the installed SFP. */
1959 mcp->in_mb |= MBX_3;
1960 mcp->buf_size = size;
1961 mcp->flags = MBX_DMA_OUT;
1962 mcp->tov = MBX_TOV_SECONDS;
1963 rval = qla2x00_mailbox_command(vha, mcp);
1964
1965 if (rval != QLA_SUCCESS) {
1966 /*EMPTY*/
1967 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1968 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1969 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1970 if (ha->init_cb) {
1971 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1972 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1973 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1974 }
1975 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1976 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1977 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1978 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1979 }
1980 } else {
1981 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1982 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1983 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1984 "Invalid SFP/Validation Failed\n");
1985 }
1986 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1987 "Done %s.\n", __func__);
1988 }
1989
1990 return rval;
1991 }
1992
1993
1994 /*
1995 * qla2x00_get_port_database
1996 * Issue normal/enhanced get port database mailbox command
1997 * and copy device name as necessary.
1998 *
1999 * Input:
2000 * ha = adapter state pointer.
2001 * dev = structure pointer.
2002 * opt = enhanced cmd option byte.
2003 *
2004 * Returns:
2005 * qla2x00 local function return status code.
2006 *
2007 * Context:
2008 * Kernel context.
2009 */
2010 int
qla2x00_get_port_database(scsi_qla_host_t * vha,fc_port_t * fcport,uint8_t opt)2011 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
2012 {
2013 int rval;
2014 mbx_cmd_t mc;
2015 mbx_cmd_t *mcp = &mc;
2016 port_database_t *pd;
2017 struct port_database_24xx *pd24;
2018 dma_addr_t pd_dma;
2019 struct qla_hw_data *ha = vha->hw;
2020
2021 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
2022 "Entered %s.\n", __func__);
2023
2024 pd24 = NULL;
2025 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
2026 if (pd == NULL) {
2027 ql_log(ql_log_warn, vha, 0x1050,
2028 "Failed to allocate port database structure.\n");
2029 fcport->query = 0;
2030 return QLA_MEMORY_ALLOC_FAILED;
2031 }
2032
2033 mcp->mb[0] = MBC_GET_PORT_DATABASE;
2034 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
2035 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
2036 mcp->mb[2] = MSW(pd_dma);
2037 mcp->mb[3] = LSW(pd_dma);
2038 mcp->mb[6] = MSW(MSD(pd_dma));
2039 mcp->mb[7] = LSW(MSD(pd_dma));
2040 mcp->mb[9] = vha->vp_idx;
2041 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2042 mcp->in_mb = MBX_0;
2043 if (IS_FWI2_CAPABLE(ha)) {
2044 mcp->mb[1] = fcport->loop_id;
2045 mcp->mb[10] = opt;
2046 mcp->out_mb |= MBX_10|MBX_1;
2047 mcp->in_mb |= MBX_1;
2048 } else if (HAS_EXTENDED_IDS(ha)) {
2049 mcp->mb[1] = fcport->loop_id;
2050 mcp->mb[10] = opt;
2051 mcp->out_mb |= MBX_10|MBX_1;
2052 } else {
2053 mcp->mb[1] = fcport->loop_id << 8 | opt;
2054 mcp->out_mb |= MBX_1;
2055 }
2056 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
2057 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
2058 mcp->flags = MBX_DMA_IN;
2059 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2060 rval = qla2x00_mailbox_command(vha, mcp);
2061 if (rval != QLA_SUCCESS)
2062 goto gpd_error_out;
2063
2064 if (IS_FWI2_CAPABLE(ha)) {
2065 uint64_t zero = 0;
2066 u8 current_login_state, last_login_state;
2067
2068 pd24 = (struct port_database_24xx *) pd;
2069
2070 /* Check for logged in state. */
2071 if (NVME_TARGET(ha, fcport)) {
2072 current_login_state = pd24->current_login_state >> 4;
2073 last_login_state = pd24->last_login_state >> 4;
2074 } else {
2075 current_login_state = pd24->current_login_state & 0xf;
2076 last_login_state = pd24->last_login_state & 0xf;
2077 }
2078 fcport->current_login_state = pd24->current_login_state;
2079 fcport->last_login_state = pd24->last_login_state;
2080
2081 /* Check for logged in state. */
2082 if (current_login_state != PDS_PRLI_COMPLETE &&
2083 last_login_state != PDS_PRLI_COMPLETE) {
2084 ql_dbg(ql_dbg_mbx, vha, 0x119a,
2085 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
2086 current_login_state, last_login_state,
2087 fcport->loop_id);
2088 rval = QLA_FUNCTION_FAILED;
2089
2090 if (!fcport->query)
2091 goto gpd_error_out;
2092 }
2093
2094 if (fcport->loop_id == FC_NO_LOOP_ID ||
2095 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2096 memcmp(fcport->port_name, pd24->port_name, 8))) {
2097 /* We lost the device mid way. */
2098 rval = QLA_NOT_LOGGED_IN;
2099 goto gpd_error_out;
2100 }
2101
2102 /* Names are little-endian. */
2103 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
2104 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
2105
2106 /* Get port_id of device. */
2107 fcport->d_id.b.domain = pd24->port_id[0];
2108 fcport->d_id.b.area = pd24->port_id[1];
2109 fcport->d_id.b.al_pa = pd24->port_id[2];
2110 fcport->d_id.b.rsvd_1 = 0;
2111
2112 /* If not target must be initiator or unknown type. */
2113 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
2114 fcport->port_type = FCT_INITIATOR;
2115 else
2116 fcport->port_type = FCT_TARGET;
2117
2118 /* Passback COS information. */
2119 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
2120 FC_COS_CLASS2 : FC_COS_CLASS3;
2121
2122 if (pd24->prli_svc_param_word_3[0] & BIT_7)
2123 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2124 } else {
2125 uint64_t zero = 0;
2126
2127 /* Check for logged in state. */
2128 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2129 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2130 ql_dbg(ql_dbg_mbx, vha, 0x100a,
2131 "Unable to verify login-state (%x/%x) - "
2132 "portid=%02x%02x%02x.\n", pd->master_state,
2133 pd->slave_state, fcport->d_id.b.domain,
2134 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2135 rval = QLA_FUNCTION_FAILED;
2136 goto gpd_error_out;
2137 }
2138
2139 if (fcport->loop_id == FC_NO_LOOP_ID ||
2140 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2141 memcmp(fcport->port_name, pd->port_name, 8))) {
2142 /* We lost the device mid way. */
2143 rval = QLA_NOT_LOGGED_IN;
2144 goto gpd_error_out;
2145 }
2146
2147 /* Names are little-endian. */
2148 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2149 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2150
2151 /* Get port_id of device. */
2152 fcport->d_id.b.domain = pd->port_id[0];
2153 fcport->d_id.b.area = pd->port_id[3];
2154 fcport->d_id.b.al_pa = pd->port_id[2];
2155 fcport->d_id.b.rsvd_1 = 0;
2156
2157 /* If not target must be initiator or unknown type. */
2158 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2159 fcport->port_type = FCT_INITIATOR;
2160 else
2161 fcport->port_type = FCT_TARGET;
2162
2163 /* Passback COS information. */
2164 fcport->supported_classes = (pd->options & BIT_4) ?
2165 FC_COS_CLASS2 : FC_COS_CLASS3;
2166 }
2167
2168 gpd_error_out:
2169 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2170 fcport->query = 0;
2171
2172 if (rval != QLA_SUCCESS) {
2173 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2174 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2175 mcp->mb[0], mcp->mb[1]);
2176 } else {
2177 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2178 "Done %s.\n", __func__);
2179 }
2180
2181 return rval;
2182 }
2183
2184 int
qla24xx_get_port_database(scsi_qla_host_t * vha,u16 nport_handle,struct port_database_24xx * pdb)2185 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
2186 struct port_database_24xx *pdb)
2187 {
2188 mbx_cmd_t mc;
2189 mbx_cmd_t *mcp = &mc;
2190 dma_addr_t pdb_dma;
2191 int rval;
2192
2193 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
2194 "Entered %s.\n", __func__);
2195
2196 memset(pdb, 0, sizeof(*pdb));
2197
2198 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
2199 sizeof(*pdb), DMA_FROM_DEVICE);
2200 if (dma_mapping_error(&vha->hw->pdev->dev, pdb_dma)) {
2201 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
2202 return QLA_MEMORY_ALLOC_FAILED;
2203 }
2204
2205 mcp->mb[0] = MBC_GET_PORT_DATABASE;
2206 mcp->mb[1] = nport_handle;
2207 mcp->mb[2] = MSW(LSD(pdb_dma));
2208 mcp->mb[3] = LSW(LSD(pdb_dma));
2209 mcp->mb[6] = MSW(MSD(pdb_dma));
2210 mcp->mb[7] = LSW(MSD(pdb_dma));
2211 mcp->mb[9] = 0;
2212 mcp->mb[10] = 0;
2213 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2214 mcp->in_mb = MBX_1|MBX_0;
2215 mcp->buf_size = sizeof(*pdb);
2216 mcp->flags = MBX_DMA_IN;
2217 mcp->tov = vha->hw->login_timeout * 2;
2218 rval = qla2x00_mailbox_command(vha, mcp);
2219
2220 if (rval != QLA_SUCCESS) {
2221 ql_dbg(ql_dbg_mbx, vha, 0x111a,
2222 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2223 rval, mcp->mb[0], mcp->mb[1]);
2224 } else {
2225 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
2226 "Done %s.\n", __func__);
2227 }
2228
2229 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
2230 sizeof(*pdb), DMA_FROM_DEVICE);
2231
2232 return rval;
2233 }
2234
2235 /*
2236 * qla2x00_get_firmware_state
2237 * Get adapter firmware state.
2238 *
2239 * Input:
2240 * ha = adapter block pointer.
2241 * dptr = pointer for firmware state.
2242 * TARGET_QUEUE_LOCK must be released.
2243 * ADAPTER_STATE_LOCK must be released.
2244 *
2245 * Returns:
2246 * qla2x00 local function return status code.
2247 *
2248 * Context:
2249 * Kernel context.
2250 */
2251 int
qla2x00_get_firmware_state(scsi_qla_host_t * vha,uint16_t * states)2252 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2253 {
2254 int rval;
2255 mbx_cmd_t mc;
2256 mbx_cmd_t *mcp = &mc;
2257 struct qla_hw_data *ha = vha->hw;
2258
2259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2260 "Entered %s.\n", __func__);
2261
2262 if (!ha->flags.fw_started)
2263 return QLA_FUNCTION_FAILED;
2264
2265 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2266 mcp->out_mb = MBX_0;
2267 if (IS_FWI2_CAPABLE(vha->hw))
2268 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2269 else
2270 mcp->in_mb = MBX_1|MBX_0;
2271 mcp->tov = MBX_TOV_SECONDS;
2272 mcp->flags = 0;
2273 rval = qla2x00_mailbox_command(vha, mcp);
2274
2275 /* Return firmware states. */
2276 states[0] = mcp->mb[1];
2277 if (IS_FWI2_CAPABLE(vha->hw)) {
2278 states[1] = mcp->mb[2];
2279 states[2] = mcp->mb[3]; /* SFP info */
2280 states[3] = mcp->mb[4];
2281 states[4] = mcp->mb[5];
2282 states[5] = mcp->mb[6]; /* DPORT status */
2283 }
2284
2285 if (rval != QLA_SUCCESS) {
2286 /*EMPTY*/
2287 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2288 } else {
2289 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2290 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2291 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2292 "Invalid SFP/Validation Failed\n");
2293 }
2294 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2295 "Done %s.\n", __func__);
2296 }
2297
2298 return rval;
2299 }
2300
2301 /*
2302 * qla2x00_get_port_name
2303 * Issue get port name mailbox command.
2304 * Returned name is in big endian format.
2305 *
2306 * Input:
2307 * ha = adapter block pointer.
2308 * loop_id = loop ID of device.
2309 * name = pointer for name.
2310 * TARGET_QUEUE_LOCK must be released.
2311 * ADAPTER_STATE_LOCK must be released.
2312 *
2313 * Returns:
2314 * qla2x00 local function return status code.
2315 *
2316 * Context:
2317 * Kernel context.
2318 */
2319 int
qla2x00_get_port_name(scsi_qla_host_t * vha,uint16_t loop_id,uint8_t * name,uint8_t opt)2320 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2321 uint8_t opt)
2322 {
2323 int rval;
2324 mbx_cmd_t mc;
2325 mbx_cmd_t *mcp = &mc;
2326
2327 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2328 "Entered %s.\n", __func__);
2329
2330 mcp->mb[0] = MBC_GET_PORT_NAME;
2331 mcp->mb[9] = vha->vp_idx;
2332 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2333 if (HAS_EXTENDED_IDS(vha->hw)) {
2334 mcp->mb[1] = loop_id;
2335 mcp->mb[10] = opt;
2336 mcp->out_mb |= MBX_10;
2337 } else {
2338 mcp->mb[1] = loop_id << 8 | opt;
2339 }
2340
2341 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2342 mcp->tov = MBX_TOV_SECONDS;
2343 mcp->flags = 0;
2344 rval = qla2x00_mailbox_command(vha, mcp);
2345
2346 if (rval != QLA_SUCCESS) {
2347 /*EMPTY*/
2348 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2349 } else {
2350 if (name != NULL) {
2351 /* This function returns name in big endian. */
2352 name[0] = MSB(mcp->mb[2]);
2353 name[1] = LSB(mcp->mb[2]);
2354 name[2] = MSB(mcp->mb[3]);
2355 name[3] = LSB(mcp->mb[3]);
2356 name[4] = MSB(mcp->mb[6]);
2357 name[5] = LSB(mcp->mb[6]);
2358 name[6] = MSB(mcp->mb[7]);
2359 name[7] = LSB(mcp->mb[7]);
2360 }
2361
2362 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2363 "Done %s.\n", __func__);
2364 }
2365
2366 return rval;
2367 }
2368
2369 /*
2370 * qla24xx_link_initialization
2371 * Issue link initialization mailbox command.
2372 *
2373 * Input:
2374 * ha = adapter block pointer.
2375 * TARGET_QUEUE_LOCK must be released.
2376 * ADAPTER_STATE_LOCK must be released.
2377 *
2378 * Returns:
2379 * qla2x00 local function return status code.
2380 *
2381 * Context:
2382 * Kernel context.
2383 */
2384 int
qla24xx_link_initialize(scsi_qla_host_t * vha)2385 qla24xx_link_initialize(scsi_qla_host_t *vha)
2386 {
2387 int rval;
2388 mbx_cmd_t mc;
2389 mbx_cmd_t *mcp = &mc;
2390
2391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2392 "Entered %s.\n", __func__);
2393
2394 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2395 return QLA_FUNCTION_FAILED;
2396
2397 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2398 mcp->mb[1] = BIT_4;
2399 if (vha->hw->operating_mode == LOOP)
2400 mcp->mb[1] |= BIT_6;
2401 else
2402 mcp->mb[1] |= BIT_5;
2403 mcp->mb[2] = 0;
2404 mcp->mb[3] = 0;
2405 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2406 mcp->in_mb = MBX_0;
2407 mcp->tov = MBX_TOV_SECONDS;
2408 mcp->flags = 0;
2409 rval = qla2x00_mailbox_command(vha, mcp);
2410
2411 if (rval != QLA_SUCCESS) {
2412 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2413 } else {
2414 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2415 "Done %s.\n", __func__);
2416 }
2417
2418 return rval;
2419 }
2420
2421 /*
2422 * qla2x00_lip_reset
2423 * Issue LIP reset mailbox command.
2424 *
2425 * Input:
2426 * ha = adapter block pointer.
2427 * TARGET_QUEUE_LOCK must be released.
2428 * ADAPTER_STATE_LOCK must be released.
2429 *
2430 * Returns:
2431 * qla2x00 local function return status code.
2432 *
2433 * Context:
2434 * Kernel context.
2435 */
2436 int
qla2x00_lip_reset(scsi_qla_host_t * vha)2437 qla2x00_lip_reset(scsi_qla_host_t *vha)
2438 {
2439 int rval;
2440 mbx_cmd_t mc;
2441 mbx_cmd_t *mcp = &mc;
2442
2443 ql_dbg(ql_dbg_disc, vha, 0x105a,
2444 "Entered %s.\n", __func__);
2445
2446 if (IS_CNA_CAPABLE(vha->hw)) {
2447 /* Logout across all FCFs. */
2448 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2449 mcp->mb[1] = BIT_1;
2450 mcp->mb[2] = 0;
2451 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2452 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2453 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2454 mcp->mb[1] = BIT_4;
2455 mcp->mb[2] = 0;
2456 mcp->mb[3] = vha->hw->loop_reset_delay;
2457 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2458 } else {
2459 mcp->mb[0] = MBC_LIP_RESET;
2460 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2461 if (HAS_EXTENDED_IDS(vha->hw)) {
2462 mcp->mb[1] = 0x00ff;
2463 mcp->mb[10] = 0;
2464 mcp->out_mb |= MBX_10;
2465 } else {
2466 mcp->mb[1] = 0xff00;
2467 }
2468 mcp->mb[2] = vha->hw->loop_reset_delay;
2469 mcp->mb[3] = 0;
2470 }
2471 mcp->in_mb = MBX_0;
2472 mcp->tov = MBX_TOV_SECONDS;
2473 mcp->flags = 0;
2474 rval = qla2x00_mailbox_command(vha, mcp);
2475
2476 if (rval != QLA_SUCCESS) {
2477 /*EMPTY*/
2478 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2479 } else {
2480 /*EMPTY*/
2481 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2482 "Done %s.\n", __func__);
2483 }
2484
2485 return rval;
2486 }
2487
2488 /*
2489 * qla2x00_send_sns
2490 * Send SNS command.
2491 *
2492 * Input:
2493 * ha = adapter block pointer.
2494 * sns = pointer for command.
2495 * cmd_size = command size.
2496 * buf_size = response/command size.
2497 * TARGET_QUEUE_LOCK must be released.
2498 * ADAPTER_STATE_LOCK must be released.
2499 *
2500 * Returns:
2501 * qla2x00 local function return status code.
2502 *
2503 * Context:
2504 * Kernel context.
2505 */
2506 int
qla2x00_send_sns(scsi_qla_host_t * vha,dma_addr_t sns_phys_address,uint16_t cmd_size,size_t buf_size)2507 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2508 uint16_t cmd_size, size_t buf_size)
2509 {
2510 int rval;
2511 mbx_cmd_t mc;
2512 mbx_cmd_t *mcp = &mc;
2513
2514 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2515 "Entered %s.\n", __func__);
2516
2517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2518 "Retry cnt=%d ratov=%d total tov=%d.\n",
2519 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2520
2521 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2522 mcp->mb[1] = cmd_size;
2523 mcp->mb[2] = MSW(sns_phys_address);
2524 mcp->mb[3] = LSW(sns_phys_address);
2525 mcp->mb[6] = MSW(MSD(sns_phys_address));
2526 mcp->mb[7] = LSW(MSD(sns_phys_address));
2527 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2528 mcp->in_mb = MBX_0|MBX_1;
2529 mcp->buf_size = buf_size;
2530 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2531 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2532 rval = qla2x00_mailbox_command(vha, mcp);
2533
2534 if (rval != QLA_SUCCESS) {
2535 /*EMPTY*/
2536 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2537 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2538 rval, mcp->mb[0], mcp->mb[1]);
2539 } else {
2540 /*EMPTY*/
2541 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2542 "Done %s.\n", __func__);
2543 }
2544
2545 return rval;
2546 }
2547
2548 int
qla24xx_login_fabric(scsi_qla_host_t * vha,uint16_t loop_id,uint8_t domain,uint8_t area,uint8_t al_pa,uint16_t * mb,uint8_t opt)2549 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2550 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2551 {
2552 int rval;
2553
2554 struct logio_entry_24xx *lg;
2555 dma_addr_t lg_dma;
2556 uint32_t iop[2];
2557 struct qla_hw_data *ha = vha->hw;
2558 struct req_que *req;
2559
2560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2561 "Entered %s.\n", __func__);
2562
2563 if (vha->vp_idx && vha->qpair)
2564 req = vha->qpair->req;
2565 else
2566 req = ha->req_q_map[0];
2567
2568 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2569 if (lg == NULL) {
2570 ql_log(ql_log_warn, vha, 0x1062,
2571 "Failed to allocate login IOCB.\n");
2572 return QLA_MEMORY_ALLOC_FAILED;
2573 }
2574
2575 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2576 lg->entry_count = 1;
2577 lg->handle = make_handle(req->id, lg->handle);
2578 lg->nport_handle = cpu_to_le16(loop_id);
2579 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2580 if (opt & BIT_0)
2581 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2582 if (opt & BIT_1)
2583 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2584 lg->port_id[0] = al_pa;
2585 lg->port_id[1] = area;
2586 lg->port_id[2] = domain;
2587 lg->vp_index = vha->vp_idx;
2588 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2589 (ha->r_a_tov / 10 * 2) + 2);
2590 if (rval != QLA_SUCCESS) {
2591 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2592 "Failed to issue login IOCB (%x).\n", rval);
2593 } else if (lg->entry_status != 0) {
2594 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2595 "Failed to complete IOCB -- error status (%x).\n",
2596 lg->entry_status);
2597 rval = QLA_FUNCTION_FAILED;
2598 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2599 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2600 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2601
2602 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2603 "Failed to complete IOCB -- completion status (%x) "
2604 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2605 iop[0], iop[1]);
2606
2607 switch (iop[0]) {
2608 case LSC_SCODE_PORTID_USED:
2609 mb[0] = MBS_PORT_ID_USED;
2610 mb[1] = LSW(iop[1]);
2611 break;
2612 case LSC_SCODE_NPORT_USED:
2613 mb[0] = MBS_LOOP_ID_USED;
2614 break;
2615 case LSC_SCODE_NOLINK:
2616 case LSC_SCODE_NOIOCB:
2617 case LSC_SCODE_NOXCB:
2618 case LSC_SCODE_CMD_FAILED:
2619 case LSC_SCODE_NOFABRIC:
2620 case LSC_SCODE_FW_NOT_READY:
2621 case LSC_SCODE_NOT_LOGGED_IN:
2622 case LSC_SCODE_NOPCB:
2623 case LSC_SCODE_ELS_REJECT:
2624 case LSC_SCODE_CMD_PARAM_ERR:
2625 case LSC_SCODE_NONPORT:
2626 case LSC_SCODE_LOGGED_IN:
2627 case LSC_SCODE_NOFLOGI_ACC:
2628 default:
2629 mb[0] = MBS_COMMAND_ERROR;
2630 break;
2631 }
2632 } else {
2633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2634 "Done %s.\n", __func__);
2635
2636 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2637
2638 mb[0] = MBS_COMMAND_COMPLETE;
2639 mb[1] = 0;
2640 if (iop[0] & BIT_4) {
2641 if (iop[0] & BIT_8)
2642 mb[1] |= BIT_1;
2643 } else
2644 mb[1] = BIT_0;
2645
2646 /* Passback COS information. */
2647 mb[10] = 0;
2648 if (lg->io_parameter[7] || lg->io_parameter[8])
2649 mb[10] |= BIT_0; /* Class 2. */
2650 if (lg->io_parameter[9] || lg->io_parameter[10])
2651 mb[10] |= BIT_1; /* Class 3. */
2652 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2653 mb[10] |= BIT_7; /* Confirmed Completion
2654 * Allowed
2655 */
2656 }
2657
2658 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2659
2660 return rval;
2661 }
2662
2663 /*
2664 * qla2x00_login_fabric
2665 * Issue login fabric port mailbox command.
2666 *
2667 * Input:
2668 * ha = adapter block pointer.
2669 * loop_id = device loop ID.
2670 * domain = device domain.
2671 * area = device area.
2672 * al_pa = device AL_PA.
2673 * status = pointer for return status.
2674 * opt = command options.
2675 * TARGET_QUEUE_LOCK must be released.
2676 * ADAPTER_STATE_LOCK must be released.
2677 *
2678 * Returns:
2679 * qla2x00 local function return status code.
2680 *
2681 * Context:
2682 * Kernel context.
2683 */
2684 int
qla2x00_login_fabric(scsi_qla_host_t * vha,uint16_t loop_id,uint8_t domain,uint8_t area,uint8_t al_pa,uint16_t * mb,uint8_t opt)2685 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2686 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2687 {
2688 int rval;
2689 mbx_cmd_t mc;
2690 mbx_cmd_t *mcp = &mc;
2691 struct qla_hw_data *ha = vha->hw;
2692
2693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2694 "Entered %s.\n", __func__);
2695
2696 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2697 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2698 if (HAS_EXTENDED_IDS(ha)) {
2699 mcp->mb[1] = loop_id;
2700 mcp->mb[10] = opt;
2701 mcp->out_mb |= MBX_10;
2702 } else {
2703 mcp->mb[1] = (loop_id << 8) | opt;
2704 }
2705 mcp->mb[2] = domain;
2706 mcp->mb[3] = area << 8 | al_pa;
2707
2708 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2709 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2710 mcp->flags = 0;
2711 rval = qla2x00_mailbox_command(vha, mcp);
2712
2713 /* Return mailbox statuses. */
2714 if (mb != NULL) {
2715 mb[0] = mcp->mb[0];
2716 mb[1] = mcp->mb[1];
2717 mb[2] = mcp->mb[2];
2718 mb[6] = mcp->mb[6];
2719 mb[7] = mcp->mb[7];
2720 /* COS retrieved from Get-Port-Database mailbox command. */
2721 mb[10] = 0;
2722 }
2723
2724 if (rval != QLA_SUCCESS) {
2725 /* RLU tmp code: need to change main mailbox_command function to
2726 * return ok even when the mailbox completion value is not
2727 * SUCCESS. The caller needs to be responsible to interpret
2728 * the return values of this mailbox command if we're not
2729 * to change too much of the existing code.
2730 */
2731 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2732 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2733 mcp->mb[0] == 0x4006)
2734 rval = QLA_SUCCESS;
2735
2736 /*EMPTY*/
2737 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2738 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2739 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2740 } else {
2741 /*EMPTY*/
2742 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2743 "Done %s.\n", __func__);
2744 }
2745
2746 return rval;
2747 }
2748
2749 /*
2750 * qla2x00_login_local_device
2751 * Issue login loop port mailbox command.
2752 *
2753 * Input:
2754 * ha = adapter block pointer.
2755 * loop_id = device loop ID.
2756 * opt = command options.
2757 *
2758 * Returns:
2759 * Return status code.
2760 *
2761 * Context:
2762 * Kernel context.
2763 *
2764 */
2765 int
qla2x00_login_local_device(scsi_qla_host_t * vha,fc_port_t * fcport,uint16_t * mb_ret,uint8_t opt)2766 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2767 uint16_t *mb_ret, uint8_t opt)
2768 {
2769 int rval;
2770 mbx_cmd_t mc;
2771 mbx_cmd_t *mcp = &mc;
2772 struct qla_hw_data *ha = vha->hw;
2773
2774 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2775 "Entered %s.\n", __func__);
2776
2777 if (IS_FWI2_CAPABLE(ha))
2778 return qla24xx_login_fabric(vha, fcport->loop_id,
2779 fcport->d_id.b.domain, fcport->d_id.b.area,
2780 fcport->d_id.b.al_pa, mb_ret, opt);
2781
2782 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2783 if (HAS_EXTENDED_IDS(ha))
2784 mcp->mb[1] = fcport->loop_id;
2785 else
2786 mcp->mb[1] = fcport->loop_id << 8;
2787 mcp->mb[2] = opt;
2788 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2789 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2790 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2791 mcp->flags = 0;
2792 rval = qla2x00_mailbox_command(vha, mcp);
2793
2794 /* Return mailbox statuses. */
2795 if (mb_ret != NULL) {
2796 mb_ret[0] = mcp->mb[0];
2797 mb_ret[1] = mcp->mb[1];
2798 mb_ret[6] = mcp->mb[6];
2799 mb_ret[7] = mcp->mb[7];
2800 }
2801
2802 if (rval != QLA_SUCCESS) {
2803 /* AV tmp code: need to change main mailbox_command function to
2804 * return ok even when the mailbox completion value is not
2805 * SUCCESS. The caller needs to be responsible to interpret
2806 * the return values of this mailbox command if we're not
2807 * to change too much of the existing code.
2808 */
2809 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2810 rval = QLA_SUCCESS;
2811
2812 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2813 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2814 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2815 } else {
2816 /*EMPTY*/
2817 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2818 "Done %s.\n", __func__);
2819 }
2820
2821 return (rval);
2822 }
2823
2824 int
qla24xx_fabric_logout(scsi_qla_host_t * vha,uint16_t loop_id,uint8_t domain,uint8_t area,uint8_t al_pa)2825 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2826 uint8_t area, uint8_t al_pa)
2827 {
2828 int rval;
2829 struct logio_entry_24xx *lg;
2830 dma_addr_t lg_dma;
2831 struct qla_hw_data *ha = vha->hw;
2832 struct req_que *req;
2833
2834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2835 "Entered %s.\n", __func__);
2836
2837 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2838 if (lg == NULL) {
2839 ql_log(ql_log_warn, vha, 0x106e,
2840 "Failed to allocate logout IOCB.\n");
2841 return QLA_MEMORY_ALLOC_FAILED;
2842 }
2843
2844 req = vha->req;
2845 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2846 lg->entry_count = 1;
2847 lg->handle = make_handle(req->id, lg->handle);
2848 lg->nport_handle = cpu_to_le16(loop_id);
2849 lg->control_flags =
2850 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2851 LCF_FREE_NPORT);
2852 lg->port_id[0] = al_pa;
2853 lg->port_id[1] = area;
2854 lg->port_id[2] = domain;
2855 lg->vp_index = vha->vp_idx;
2856 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2857 (ha->r_a_tov / 10 * 2) + 2);
2858 if (rval != QLA_SUCCESS) {
2859 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2860 "Failed to issue logout IOCB (%x).\n", rval);
2861 } else if (lg->entry_status != 0) {
2862 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2863 "Failed to complete IOCB -- error status (%x).\n",
2864 lg->entry_status);
2865 rval = QLA_FUNCTION_FAILED;
2866 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2867 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2868 "Failed to complete IOCB -- completion status (%x) "
2869 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2870 le32_to_cpu(lg->io_parameter[0]),
2871 le32_to_cpu(lg->io_parameter[1]));
2872 } else {
2873 /*EMPTY*/
2874 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2875 "Done %s.\n", __func__);
2876 }
2877
2878 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2879
2880 return rval;
2881 }
2882
2883 /*
2884 * qla2x00_fabric_logout
2885 * Issue logout fabric port mailbox command.
2886 *
2887 * Input:
2888 * ha = adapter block pointer.
2889 * loop_id = device loop ID.
2890 * TARGET_QUEUE_LOCK must be released.
2891 * ADAPTER_STATE_LOCK must be released.
2892 *
2893 * Returns:
2894 * qla2x00 local function return status code.
2895 *
2896 * Context:
2897 * Kernel context.
2898 */
2899 int
qla2x00_fabric_logout(scsi_qla_host_t * vha,uint16_t loop_id,uint8_t domain,uint8_t area,uint8_t al_pa)2900 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2901 uint8_t area, uint8_t al_pa)
2902 {
2903 int rval;
2904 mbx_cmd_t mc;
2905 mbx_cmd_t *mcp = &mc;
2906
2907 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2908 "Entered %s.\n", __func__);
2909
2910 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2911 mcp->out_mb = MBX_1|MBX_0;
2912 if (HAS_EXTENDED_IDS(vha->hw)) {
2913 mcp->mb[1] = loop_id;
2914 mcp->mb[10] = 0;
2915 mcp->out_mb |= MBX_10;
2916 } else {
2917 mcp->mb[1] = loop_id << 8;
2918 }
2919
2920 mcp->in_mb = MBX_1|MBX_0;
2921 mcp->tov = MBX_TOV_SECONDS;
2922 mcp->flags = 0;
2923 rval = qla2x00_mailbox_command(vha, mcp);
2924
2925 if (rval != QLA_SUCCESS) {
2926 /*EMPTY*/
2927 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2928 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2929 } else {
2930 /*EMPTY*/
2931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2932 "Done %s.\n", __func__);
2933 }
2934
2935 return rval;
2936 }
2937
2938 /*
2939 * qla2x00_full_login_lip
2940 * Issue full login LIP mailbox command.
2941 *
2942 * Input:
2943 * ha = adapter block pointer.
2944 * TARGET_QUEUE_LOCK must be released.
2945 * ADAPTER_STATE_LOCK must be released.
2946 *
2947 * Returns:
2948 * qla2x00 local function return status code.
2949 *
2950 * Context:
2951 * Kernel context.
2952 */
2953 int
qla2x00_full_login_lip(scsi_qla_host_t * vha)2954 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2955 {
2956 int rval;
2957 mbx_cmd_t mc;
2958 mbx_cmd_t *mcp = &mc;
2959
2960 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2961 "Entered %s.\n", __func__);
2962
2963 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2964 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2965 mcp->mb[2] = 0;
2966 mcp->mb[3] = 0;
2967 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2968 mcp->in_mb = MBX_0;
2969 mcp->tov = MBX_TOV_SECONDS;
2970 mcp->flags = 0;
2971 rval = qla2x00_mailbox_command(vha, mcp);
2972
2973 if (rval != QLA_SUCCESS) {
2974 /*EMPTY*/
2975 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2976 } else {
2977 /*EMPTY*/
2978 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2979 "Done %s.\n", __func__);
2980 }
2981
2982 return rval;
2983 }
2984
2985 /*
2986 * qla2x00_get_id_list
2987 *
2988 * Input:
2989 * ha = adapter block pointer.
2990 *
2991 * Returns:
2992 * qla2x00 local function return status code.
2993 *
2994 * Context:
2995 * Kernel context.
2996 */
2997 int
qla2x00_get_id_list(scsi_qla_host_t * vha,void * id_list,dma_addr_t id_list_dma,uint16_t * entries)2998 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2999 uint16_t *entries)
3000 {
3001 int rval;
3002 mbx_cmd_t mc;
3003 mbx_cmd_t *mcp = &mc;
3004
3005 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
3006 "Entered %s.\n", __func__);
3007
3008 if (id_list == NULL)
3009 return QLA_FUNCTION_FAILED;
3010
3011 mcp->mb[0] = MBC_GET_ID_LIST;
3012 mcp->out_mb = MBX_0;
3013 if (IS_FWI2_CAPABLE(vha->hw)) {
3014 mcp->mb[2] = MSW(id_list_dma);
3015 mcp->mb[3] = LSW(id_list_dma);
3016 mcp->mb[6] = MSW(MSD(id_list_dma));
3017 mcp->mb[7] = LSW(MSD(id_list_dma));
3018 mcp->mb[8] = 0;
3019 mcp->mb[9] = vha->vp_idx;
3020 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
3021 } else {
3022 mcp->mb[1] = MSW(id_list_dma);
3023 mcp->mb[2] = LSW(id_list_dma);
3024 mcp->mb[3] = MSW(MSD(id_list_dma));
3025 mcp->mb[6] = LSW(MSD(id_list_dma));
3026 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
3027 }
3028 mcp->in_mb = MBX_1|MBX_0;
3029 mcp->tov = MBX_TOV_SECONDS;
3030 mcp->flags = 0;
3031 rval = qla2x00_mailbox_command(vha, mcp);
3032
3033 if (rval != QLA_SUCCESS) {
3034 /*EMPTY*/
3035 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
3036 } else {
3037 *entries = mcp->mb[1];
3038 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
3039 "Done %s.\n", __func__);
3040 }
3041
3042 return rval;
3043 }
3044
3045 /*
3046 * qla2x00_get_resource_cnts
3047 * Get current firmware resource counts.
3048 *
3049 * Input:
3050 * ha = adapter block pointer.
3051 *
3052 * Returns:
3053 * qla2x00 local function return status code.
3054 *
3055 * Context:
3056 * Kernel context.
3057 */
3058 int
qla2x00_get_resource_cnts(scsi_qla_host_t * vha)3059 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
3060 {
3061 struct qla_hw_data *ha = vha->hw;
3062 int rval;
3063 mbx_cmd_t mc;
3064 mbx_cmd_t *mcp = &mc;
3065
3066 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
3067 "Entered %s.\n", __func__);
3068
3069 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
3070 mcp->out_mb = MBX_0;
3071 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3072 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
3073 IS_QLA27XX(ha) || IS_QLA28XX(ha))
3074 mcp->in_mb |= MBX_12;
3075 mcp->tov = MBX_TOV_SECONDS;
3076 mcp->flags = 0;
3077 rval = qla2x00_mailbox_command(vha, mcp);
3078
3079 if (rval != QLA_SUCCESS) {
3080 /*EMPTY*/
3081 ql_dbg(ql_dbg_mbx, vha, 0x107d,
3082 "Failed mb[0]=%x.\n", mcp->mb[0]);
3083 } else {
3084 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
3085 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
3086 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
3087 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
3088 mcp->mb[11], mcp->mb[12]);
3089
3090 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
3091 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
3092 ha->cur_fw_xcb_count = mcp->mb[3];
3093 ha->orig_fw_xcb_count = mcp->mb[6];
3094 ha->cur_fw_iocb_count = mcp->mb[7];
3095 ha->orig_fw_iocb_count = mcp->mb[10];
3096 if (ha->flags.npiv_supported)
3097 ha->max_npiv_vports = mcp->mb[11];
3098 if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3099 ha->fw_max_fcf_count = mcp->mb[12];
3100 }
3101
3102 return (rval);
3103 }
3104
3105 /*
3106 * qla2x00_get_fcal_position_map
3107 * Get FCAL (LILP) position map using mailbox command
3108 *
3109 * Input:
3110 * ha = adapter state pointer.
3111 * pos_map = buffer pointer (can be NULL).
3112 *
3113 * Returns:
3114 * qla2x00 local function return status code.
3115 *
3116 * Context:
3117 * Kernel context.
3118 */
3119 int
qla2x00_get_fcal_position_map(scsi_qla_host_t * vha,char * pos_map,u8 * num_entries)3120 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map,
3121 u8 *num_entries)
3122 {
3123 int rval;
3124 mbx_cmd_t mc;
3125 mbx_cmd_t *mcp = &mc;
3126 char *pmap;
3127 dma_addr_t pmap_dma;
3128 struct qla_hw_data *ha = vha->hw;
3129
3130 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
3131 "Entered %s.\n", __func__);
3132
3133 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
3134 if (pmap == NULL) {
3135 ql_log(ql_log_warn, vha, 0x1080,
3136 "Memory alloc failed.\n");
3137 return QLA_MEMORY_ALLOC_FAILED;
3138 }
3139
3140 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
3141 mcp->mb[2] = MSW(pmap_dma);
3142 mcp->mb[3] = LSW(pmap_dma);
3143 mcp->mb[6] = MSW(MSD(pmap_dma));
3144 mcp->mb[7] = LSW(MSD(pmap_dma));
3145 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3146 mcp->in_mb = MBX_1|MBX_0;
3147 mcp->buf_size = FCAL_MAP_SIZE;
3148 mcp->flags = MBX_DMA_IN;
3149 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
3150 rval = qla2x00_mailbox_command(vha, mcp);
3151
3152 if (rval == QLA_SUCCESS) {
3153 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
3154 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
3155 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
3156 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
3157 pmap, pmap[0] + 1);
3158
3159 if (pos_map)
3160 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
3161 if (num_entries)
3162 *num_entries = pmap[0];
3163 }
3164 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
3165
3166 if (rval != QLA_SUCCESS) {
3167 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
3168 } else {
3169 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
3170 "Done %s.\n", __func__);
3171 }
3172
3173 return rval;
3174 }
3175
3176 /*
3177 * qla2x00_get_link_status
3178 *
3179 * Input:
3180 * ha = adapter block pointer.
3181 * loop_id = device loop ID.
3182 * ret_buf = pointer to link status return buffer.
3183 *
3184 * Returns:
3185 * 0 = success.
3186 * BIT_0 = mem alloc error.
3187 * BIT_1 = mailbox error.
3188 */
3189 int
qla2x00_get_link_status(scsi_qla_host_t * vha,uint16_t loop_id,struct link_statistics * stats,dma_addr_t stats_dma)3190 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3191 struct link_statistics *stats, dma_addr_t stats_dma)
3192 {
3193 int rval;
3194 mbx_cmd_t mc;
3195 mbx_cmd_t *mcp = &mc;
3196 uint32_t *iter = (uint32_t *)stats;
3197 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3198 struct qla_hw_data *ha = vha->hw;
3199
3200 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3201 "Entered %s.\n", __func__);
3202
3203 mcp->mb[0] = MBC_GET_LINK_STATUS;
3204 mcp->mb[2] = MSW(LSD(stats_dma));
3205 mcp->mb[3] = LSW(LSD(stats_dma));
3206 mcp->mb[6] = MSW(MSD(stats_dma));
3207 mcp->mb[7] = LSW(MSD(stats_dma));
3208 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3209 mcp->in_mb = MBX_0;
3210 if (IS_FWI2_CAPABLE(ha)) {
3211 mcp->mb[1] = loop_id;
3212 mcp->mb[4] = 0;
3213 mcp->mb[10] = 0;
3214 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3215 mcp->in_mb |= MBX_1;
3216 } else if (HAS_EXTENDED_IDS(ha)) {
3217 mcp->mb[1] = loop_id;
3218 mcp->mb[10] = 0;
3219 mcp->out_mb |= MBX_10|MBX_1;
3220 } else {
3221 mcp->mb[1] = loop_id << 8;
3222 mcp->out_mb |= MBX_1;
3223 }
3224 mcp->tov = MBX_TOV_SECONDS;
3225 mcp->flags = IOCTL_CMD;
3226 rval = qla2x00_mailbox_command(vha, mcp);
3227
3228 if (rval == QLA_SUCCESS) {
3229 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3230 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3231 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3232 rval = QLA_FUNCTION_FAILED;
3233 } else {
3234 /* Re-endianize - firmware data is le32. */
3235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3236 "Done %s.\n", __func__);
3237 for ( ; dwords--; iter++)
3238 le32_to_cpus(iter);
3239 }
3240 } else {
3241 /* Failed. */
3242 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3243 }
3244
3245 return rval;
3246 }
3247
3248 int
qla24xx_get_isp_stats(scsi_qla_host_t * vha,struct link_statistics * stats,dma_addr_t stats_dma,uint16_t options)3249 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3250 dma_addr_t stats_dma, uint16_t options)
3251 {
3252 int rval;
3253 mbx_cmd_t mc;
3254 mbx_cmd_t *mcp = &mc;
3255 uint32_t *iter = (uint32_t *)stats;
3256 ushort dwords = sizeof(*stats)/sizeof(*iter);
3257
3258 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3259 "Entered %s.\n", __func__);
3260
3261 memset(&mc, 0, sizeof(mc));
3262 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3263 mc.mb[2] = MSW(LSD(stats_dma));
3264 mc.mb[3] = LSW(LSD(stats_dma));
3265 mc.mb[6] = MSW(MSD(stats_dma));
3266 mc.mb[7] = LSW(MSD(stats_dma));
3267 mc.mb[8] = dwords;
3268 mc.mb[9] = vha->vp_idx;
3269 mc.mb[10] = options;
3270
3271 rval = qla24xx_send_mb_cmd(vha, &mc);
3272
3273 if (rval == QLA_SUCCESS) {
3274 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3275 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3276 "Failed mb[0]=%x.\n", mcp->mb[0]);
3277 rval = QLA_FUNCTION_FAILED;
3278 } else {
3279 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3280 "Done %s.\n", __func__);
3281 /* Re-endianize - firmware data is le32. */
3282 for ( ; dwords--; iter++)
3283 le32_to_cpus(iter);
3284 }
3285 } else {
3286 /* Failed. */
3287 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3288 }
3289
3290 return rval;
3291 }
3292
3293 int
qla24xx_abort_command(srb_t * sp)3294 qla24xx_abort_command(srb_t *sp)
3295 {
3296 int rval;
3297 unsigned long flags = 0;
3298
3299 struct abort_entry_24xx *abt;
3300 dma_addr_t abt_dma;
3301 uint32_t handle;
3302 fc_port_t *fcport = sp->fcport;
3303 struct scsi_qla_host *vha = fcport->vha;
3304 struct qla_hw_data *ha = vha->hw;
3305 struct req_que *req;
3306 struct qla_qpair *qpair = sp->qpair;
3307
3308 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3309 "Entered %s.\n", __func__);
3310
3311 if (sp->qpair)
3312 req = sp->qpair->req;
3313 else
3314 return QLA_ERR_NO_QPAIR;
3315
3316 if (ql2xasynctmfenable)
3317 return qla24xx_async_abort_command(sp);
3318
3319 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3320 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3321 if (req->outstanding_cmds[handle] == sp)
3322 break;
3323 }
3324 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3325 if (handle == req->num_outstanding_cmds) {
3326 /* Command not found. */
3327 return QLA_ERR_NOT_FOUND;
3328 }
3329
3330 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3331 if (abt == NULL) {
3332 ql_log(ql_log_warn, vha, 0x108d,
3333 "Failed to allocate abort IOCB.\n");
3334 return QLA_MEMORY_ALLOC_FAILED;
3335 }
3336
3337 abt->entry_type = ABORT_IOCB_TYPE;
3338 abt->entry_count = 1;
3339 abt->handle = make_handle(req->id, abt->handle);
3340 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3341 abt->handle_to_abort = make_handle(req->id, handle);
3342 abt->port_id[0] = fcport->d_id.b.al_pa;
3343 abt->port_id[1] = fcport->d_id.b.area;
3344 abt->port_id[2] = fcport->d_id.b.domain;
3345 abt->vp_index = fcport->vha->vp_idx;
3346
3347 abt->req_que_no = cpu_to_le16(req->id);
3348 /* Need to pass original sp */
3349 qla_nvme_abort_set_option(abt, sp);
3350
3351 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3352 if (rval != QLA_SUCCESS) {
3353 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3354 "Failed to issue IOCB (%x).\n", rval);
3355 } else if (abt->entry_status != 0) {
3356 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3357 "Failed to complete IOCB -- error status (%x).\n",
3358 abt->entry_status);
3359 rval = QLA_FUNCTION_FAILED;
3360 } else if (abt->nport_handle != cpu_to_le16(0)) {
3361 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3362 "Failed to complete IOCB -- completion status (%x).\n",
3363 le16_to_cpu(abt->nport_handle));
3364 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
3365 rval = QLA_FUNCTION_PARAMETER_ERROR;
3366 else
3367 rval = QLA_FUNCTION_FAILED;
3368 } else {
3369 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3370 "Done %s.\n", __func__);
3371 }
3372 if (rval == QLA_SUCCESS)
3373 qla_nvme_abort_process_comp_status(abt, sp);
3374
3375 qla_wait_nvme_release_cmd_kref(sp);
3376
3377 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3378
3379 return rval;
3380 }
3381
3382 struct tsk_mgmt_cmd {
3383 union {
3384 struct tsk_mgmt_entry tsk;
3385 struct sts_entry_24xx sts;
3386 } p;
3387 };
3388
3389 static int
__qla24xx_issue_tmf(char * name,uint32_t type,struct fc_port * fcport,uint64_t l,int tag)3390 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3391 uint64_t l, int tag)
3392 {
3393 int rval, rval2;
3394 struct tsk_mgmt_cmd *tsk;
3395 struct sts_entry_24xx *sts;
3396 dma_addr_t tsk_dma;
3397 scsi_qla_host_t *vha;
3398 struct qla_hw_data *ha;
3399 struct req_que *req;
3400 struct qla_qpair *qpair;
3401
3402 vha = fcport->vha;
3403 ha = vha->hw;
3404 req = vha->req;
3405
3406 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3407 "Entered %s.\n", __func__);
3408
3409 if (vha->vp_idx && vha->qpair) {
3410 /* NPIV port */
3411 qpair = vha->qpair;
3412 req = qpair->req;
3413 }
3414
3415 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3416 if (tsk == NULL) {
3417 ql_log(ql_log_warn, vha, 0x1093,
3418 "Failed to allocate task management IOCB.\n");
3419 return QLA_MEMORY_ALLOC_FAILED;
3420 }
3421
3422 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3423 tsk->p.tsk.entry_count = 1;
3424 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
3425 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3426 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3427 tsk->p.tsk.control_flags = cpu_to_le32(type);
3428 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3429 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3430 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3431 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3432 if (type == TCF_LUN_RESET) {
3433 int_to_scsilun(l, &tsk->p.tsk.lun);
3434 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3435 sizeof(tsk->p.tsk.lun));
3436 }
3437
3438 sts = &tsk->p.sts;
3439 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3440 if (rval != QLA_SUCCESS) {
3441 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3442 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3443 } else if (sts->entry_status != 0) {
3444 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3445 "Failed to complete IOCB -- error status (%x).\n",
3446 sts->entry_status);
3447 rval = QLA_FUNCTION_FAILED;
3448 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3449 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3450 "Failed to complete IOCB -- completion status (%x).\n",
3451 le16_to_cpu(sts->comp_status));
3452 rval = QLA_FUNCTION_FAILED;
3453 } else if (le16_to_cpu(sts->scsi_status) &
3454 SS_RESPONSE_INFO_LEN_VALID) {
3455 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3456 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3457 "Ignoring inconsistent data length -- not enough "
3458 "response info (%d).\n",
3459 le32_to_cpu(sts->rsp_data_len));
3460 } else if (sts->data[3]) {
3461 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3462 "Failed to complete IOCB -- response (%x).\n",
3463 sts->data[3]);
3464 rval = QLA_FUNCTION_FAILED;
3465 }
3466 }
3467
3468 /* Issue marker IOCB. */
3469 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3470 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3471 if (rval2 != QLA_SUCCESS) {
3472 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3473 "Failed to issue marker IOCB (%x).\n", rval2);
3474 } else {
3475 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3476 "Done %s.\n", __func__);
3477 }
3478
3479 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3480
3481 return rval;
3482 }
3483
3484 int
qla24xx_abort_target(struct fc_port * fcport,uint64_t l,int tag)3485 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3486 {
3487 struct qla_hw_data *ha = fcport->vha->hw;
3488
3489 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3490 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3491
3492 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3493 }
3494
3495 int
qla24xx_lun_reset(struct fc_port * fcport,uint64_t l,int tag)3496 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3497 {
3498 struct qla_hw_data *ha = fcport->vha->hw;
3499
3500 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3501 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3502
3503 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3504 }
3505
3506 int
qla2x00_system_error(scsi_qla_host_t * vha)3507 qla2x00_system_error(scsi_qla_host_t *vha)
3508 {
3509 int rval;
3510 mbx_cmd_t mc;
3511 mbx_cmd_t *mcp = &mc;
3512 struct qla_hw_data *ha = vha->hw;
3513
3514 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3515 return QLA_FUNCTION_FAILED;
3516
3517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3518 "Entered %s.\n", __func__);
3519
3520 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3521 mcp->out_mb = MBX_0;
3522 mcp->in_mb = MBX_0;
3523 mcp->tov = 5;
3524 mcp->flags = 0;
3525 rval = qla2x00_mailbox_command(vha, mcp);
3526
3527 if (rval != QLA_SUCCESS) {
3528 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3529 } else {
3530 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3531 "Done %s.\n", __func__);
3532 }
3533
3534 return rval;
3535 }
3536
3537 int
qla2x00_write_serdes_word(scsi_qla_host_t * vha,uint16_t addr,uint16_t data)3538 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3539 {
3540 int rval;
3541 mbx_cmd_t mc;
3542 mbx_cmd_t *mcp = &mc;
3543
3544 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3545 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3546 return QLA_FUNCTION_FAILED;
3547
3548 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3549 "Entered %s.\n", __func__);
3550
3551 mcp->mb[0] = MBC_WRITE_SERDES;
3552 mcp->mb[1] = addr;
3553 if (IS_QLA2031(vha->hw))
3554 mcp->mb[2] = data & 0xff;
3555 else
3556 mcp->mb[2] = data;
3557
3558 mcp->mb[3] = 0;
3559 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3560 mcp->in_mb = MBX_0;
3561 mcp->tov = MBX_TOV_SECONDS;
3562 mcp->flags = 0;
3563 rval = qla2x00_mailbox_command(vha, mcp);
3564
3565 if (rval != QLA_SUCCESS) {
3566 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3567 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3568 } else {
3569 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3570 "Done %s.\n", __func__);
3571 }
3572
3573 return rval;
3574 }
3575
3576 int
qla2x00_read_serdes_word(scsi_qla_host_t * vha,uint16_t addr,uint16_t * data)3577 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3578 {
3579 int rval;
3580 mbx_cmd_t mc;
3581 mbx_cmd_t *mcp = &mc;
3582
3583 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3584 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3585 return QLA_FUNCTION_FAILED;
3586
3587 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3588 "Entered %s.\n", __func__);
3589
3590 mcp->mb[0] = MBC_READ_SERDES;
3591 mcp->mb[1] = addr;
3592 mcp->mb[3] = 0;
3593 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3594 mcp->in_mb = MBX_1|MBX_0;
3595 mcp->tov = MBX_TOV_SECONDS;
3596 mcp->flags = 0;
3597 rval = qla2x00_mailbox_command(vha, mcp);
3598
3599 if (IS_QLA2031(vha->hw))
3600 *data = mcp->mb[1] & 0xff;
3601 else
3602 *data = mcp->mb[1];
3603
3604 if (rval != QLA_SUCCESS) {
3605 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3606 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3607 } else {
3608 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3609 "Done %s.\n", __func__);
3610 }
3611
3612 return rval;
3613 }
3614
3615 int
qla8044_write_serdes_word(scsi_qla_host_t * vha,uint32_t addr,uint32_t data)3616 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3617 {
3618 int rval;
3619 mbx_cmd_t mc;
3620 mbx_cmd_t *mcp = &mc;
3621
3622 if (!IS_QLA8044(vha->hw))
3623 return QLA_FUNCTION_FAILED;
3624
3625 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3626 "Entered %s.\n", __func__);
3627
3628 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3629 mcp->mb[1] = HCS_WRITE_SERDES;
3630 mcp->mb[3] = LSW(addr);
3631 mcp->mb[4] = MSW(addr);
3632 mcp->mb[5] = LSW(data);
3633 mcp->mb[6] = MSW(data);
3634 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3635 mcp->in_mb = MBX_0;
3636 mcp->tov = MBX_TOV_SECONDS;
3637 mcp->flags = 0;
3638 rval = qla2x00_mailbox_command(vha, mcp);
3639
3640 if (rval != QLA_SUCCESS) {
3641 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3642 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3643 } else {
3644 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3645 "Done %s.\n", __func__);
3646 }
3647
3648 return rval;
3649 }
3650
3651 int
qla8044_read_serdes_word(scsi_qla_host_t * vha,uint32_t addr,uint32_t * data)3652 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3653 {
3654 int rval;
3655 mbx_cmd_t mc;
3656 mbx_cmd_t *mcp = &mc;
3657
3658 if (!IS_QLA8044(vha->hw))
3659 return QLA_FUNCTION_FAILED;
3660
3661 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3662 "Entered %s.\n", __func__);
3663
3664 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3665 mcp->mb[1] = HCS_READ_SERDES;
3666 mcp->mb[3] = LSW(addr);
3667 mcp->mb[4] = MSW(addr);
3668 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3669 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3670 mcp->tov = MBX_TOV_SECONDS;
3671 mcp->flags = 0;
3672 rval = qla2x00_mailbox_command(vha, mcp);
3673
3674 *data = mcp->mb[2] << 16 | mcp->mb[1];
3675
3676 if (rval != QLA_SUCCESS) {
3677 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3678 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3679 } else {
3680 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3681 "Done %s.\n", __func__);
3682 }
3683
3684 return rval;
3685 }
3686
3687 /**
3688 * qla2x00_set_serdes_params() -
3689 * @vha: HA context
3690 * @sw_em_1g: serial link options
3691 * @sw_em_2g: serial link options
3692 * @sw_em_4g: serial link options
3693 *
3694 * Returns
3695 */
3696 int
qla2x00_set_serdes_params(scsi_qla_host_t * vha,uint16_t sw_em_1g,uint16_t sw_em_2g,uint16_t sw_em_4g)3697 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3698 uint16_t sw_em_2g, uint16_t sw_em_4g)
3699 {
3700 int rval;
3701 mbx_cmd_t mc;
3702 mbx_cmd_t *mcp = &mc;
3703
3704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3705 "Entered %s.\n", __func__);
3706
3707 mcp->mb[0] = MBC_SERDES_PARAMS;
3708 mcp->mb[1] = BIT_0;
3709 mcp->mb[2] = sw_em_1g | BIT_15;
3710 mcp->mb[3] = sw_em_2g | BIT_15;
3711 mcp->mb[4] = sw_em_4g | BIT_15;
3712 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3713 mcp->in_mb = MBX_0;
3714 mcp->tov = MBX_TOV_SECONDS;
3715 mcp->flags = 0;
3716 rval = qla2x00_mailbox_command(vha, mcp);
3717
3718 if (rval != QLA_SUCCESS) {
3719 /*EMPTY*/
3720 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3721 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3722 } else {
3723 /*EMPTY*/
3724 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3725 "Done %s.\n", __func__);
3726 }
3727
3728 return rval;
3729 }
3730
3731 int
qla2x00_stop_firmware(scsi_qla_host_t * vha)3732 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3733 {
3734 int rval;
3735 mbx_cmd_t mc;
3736 mbx_cmd_t *mcp = &mc;
3737
3738 if (!IS_FWI2_CAPABLE(vha->hw))
3739 return QLA_FUNCTION_FAILED;
3740
3741 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3742 "Entered %s.\n", __func__);
3743
3744 mcp->mb[0] = MBC_STOP_FIRMWARE;
3745 mcp->mb[1] = 0;
3746 mcp->out_mb = MBX_1|MBX_0;
3747 mcp->in_mb = MBX_0;
3748 mcp->tov = 5;
3749 mcp->flags = 0;
3750 rval = qla2x00_mailbox_command(vha, mcp);
3751
3752 if (rval != QLA_SUCCESS) {
3753 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3754 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3755 rval = QLA_INVALID_COMMAND;
3756 } else {
3757 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3758 "Done %s.\n", __func__);
3759 }
3760
3761 return rval;
3762 }
3763
3764 int
qla2x00_enable_eft_trace(scsi_qla_host_t * vha,dma_addr_t eft_dma,uint16_t buffers)3765 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3766 uint16_t buffers)
3767 {
3768 int rval;
3769 mbx_cmd_t mc;
3770 mbx_cmd_t *mcp = &mc;
3771
3772 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3773 "Entered %s.\n", __func__);
3774
3775 if (!IS_FWI2_CAPABLE(vha->hw))
3776 return QLA_FUNCTION_FAILED;
3777
3778 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3779 return QLA_FUNCTION_FAILED;
3780
3781 mcp->mb[0] = MBC_TRACE_CONTROL;
3782 mcp->mb[1] = TC_EFT_ENABLE;
3783 mcp->mb[2] = LSW(eft_dma);
3784 mcp->mb[3] = MSW(eft_dma);
3785 mcp->mb[4] = LSW(MSD(eft_dma));
3786 mcp->mb[5] = MSW(MSD(eft_dma));
3787 mcp->mb[6] = buffers;
3788 mcp->mb[7] = TC_AEN_DISABLE;
3789 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3790 mcp->in_mb = MBX_1|MBX_0;
3791 mcp->tov = MBX_TOV_SECONDS;
3792 mcp->flags = 0;
3793 rval = qla2x00_mailbox_command(vha, mcp);
3794 if (rval != QLA_SUCCESS) {
3795 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3796 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3797 rval, mcp->mb[0], mcp->mb[1]);
3798 } else {
3799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3800 "Done %s.\n", __func__);
3801 }
3802
3803 return rval;
3804 }
3805
3806 int
qla2x00_disable_eft_trace(scsi_qla_host_t * vha)3807 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3808 {
3809 int rval;
3810 mbx_cmd_t mc;
3811 mbx_cmd_t *mcp = &mc;
3812
3813 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3814 "Entered %s.\n", __func__);
3815
3816 if (!IS_FWI2_CAPABLE(vha->hw))
3817 return QLA_FUNCTION_FAILED;
3818
3819 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3820 return QLA_FUNCTION_FAILED;
3821
3822 mcp->mb[0] = MBC_TRACE_CONTROL;
3823 mcp->mb[1] = TC_EFT_DISABLE;
3824 mcp->out_mb = MBX_1|MBX_0;
3825 mcp->in_mb = MBX_1|MBX_0;
3826 mcp->tov = MBX_TOV_SECONDS;
3827 mcp->flags = 0;
3828 rval = qla2x00_mailbox_command(vha, mcp);
3829 if (rval != QLA_SUCCESS) {
3830 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3831 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3832 rval, mcp->mb[0], mcp->mb[1]);
3833 } else {
3834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3835 "Done %s.\n", __func__);
3836 }
3837
3838 return rval;
3839 }
3840
3841 int
qla2x00_enable_fce_trace(scsi_qla_host_t * vha,dma_addr_t fce_dma,uint16_t buffers,uint16_t * mb,uint32_t * dwords)3842 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3843 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3844 {
3845 int rval;
3846 mbx_cmd_t mc;
3847 mbx_cmd_t *mcp = &mc;
3848
3849 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3850 "Entered %s.\n", __func__);
3851
3852 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3853 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3854 !IS_QLA28XX(vha->hw))
3855 return QLA_FUNCTION_FAILED;
3856
3857 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3858 return QLA_FUNCTION_FAILED;
3859
3860 mcp->mb[0] = MBC_TRACE_CONTROL;
3861 mcp->mb[1] = TC_FCE_ENABLE;
3862 mcp->mb[2] = LSW(fce_dma);
3863 mcp->mb[3] = MSW(fce_dma);
3864 mcp->mb[4] = LSW(MSD(fce_dma));
3865 mcp->mb[5] = MSW(MSD(fce_dma));
3866 mcp->mb[6] = buffers;
3867 mcp->mb[7] = TC_AEN_DISABLE;
3868 mcp->mb[8] = 0;
3869 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3870 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3871 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3872 MBX_1|MBX_0;
3873 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3874 mcp->tov = MBX_TOV_SECONDS;
3875 mcp->flags = 0;
3876 rval = qla2x00_mailbox_command(vha, mcp);
3877 if (rval != QLA_SUCCESS) {
3878 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3879 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3880 rval, mcp->mb[0], mcp->mb[1]);
3881 } else {
3882 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3883 "Done %s.\n", __func__);
3884
3885 if (mb)
3886 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3887 if (dwords)
3888 *dwords = buffers;
3889 }
3890
3891 return rval;
3892 }
3893
3894 int
qla2x00_disable_fce_trace(scsi_qla_host_t * vha,uint64_t * wr,uint64_t * rd)3895 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3896 {
3897 int rval;
3898 mbx_cmd_t mc;
3899 mbx_cmd_t *mcp = &mc;
3900
3901 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3902 "Entered %s.\n", __func__);
3903
3904 if (!IS_FWI2_CAPABLE(vha->hw))
3905 return QLA_FUNCTION_FAILED;
3906
3907 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3908 return QLA_FUNCTION_FAILED;
3909
3910 mcp->mb[0] = MBC_TRACE_CONTROL;
3911 mcp->mb[1] = TC_FCE_DISABLE;
3912 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3913 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3914 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3915 MBX_1|MBX_0;
3916 mcp->tov = MBX_TOV_SECONDS;
3917 mcp->flags = 0;
3918 rval = qla2x00_mailbox_command(vha, mcp);
3919 if (rval != QLA_SUCCESS) {
3920 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3921 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3922 rval, mcp->mb[0], mcp->mb[1]);
3923 } else {
3924 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3925 "Done %s.\n", __func__);
3926
3927 if (wr)
3928 *wr = (uint64_t) mcp->mb[5] << 48 |
3929 (uint64_t) mcp->mb[4] << 32 |
3930 (uint64_t) mcp->mb[3] << 16 |
3931 (uint64_t) mcp->mb[2];
3932 if (rd)
3933 *rd = (uint64_t) mcp->mb[9] << 48 |
3934 (uint64_t) mcp->mb[8] << 32 |
3935 (uint64_t) mcp->mb[7] << 16 |
3936 (uint64_t) mcp->mb[6];
3937 }
3938
3939 return rval;
3940 }
3941
3942 int
qla2x00_get_idma_speed(scsi_qla_host_t * vha,uint16_t loop_id,uint16_t * port_speed,uint16_t * mb)3943 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3944 uint16_t *port_speed, uint16_t *mb)
3945 {
3946 int rval;
3947 mbx_cmd_t mc;
3948 mbx_cmd_t *mcp = &mc;
3949
3950 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3951 "Entered %s.\n", __func__);
3952
3953 if (!IS_IIDMA_CAPABLE(vha->hw))
3954 return QLA_FUNCTION_FAILED;
3955
3956 mcp->mb[0] = MBC_PORT_PARAMS;
3957 mcp->mb[1] = loop_id;
3958 mcp->mb[2] = mcp->mb[3] = 0;
3959 mcp->mb[9] = vha->vp_idx;
3960 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3961 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3962 mcp->tov = MBX_TOV_SECONDS;
3963 mcp->flags = 0;
3964 rval = qla2x00_mailbox_command(vha, mcp);
3965
3966 /* Return mailbox statuses. */
3967 if (mb) {
3968 mb[0] = mcp->mb[0];
3969 mb[1] = mcp->mb[1];
3970 mb[3] = mcp->mb[3];
3971 }
3972
3973 if (rval != QLA_SUCCESS) {
3974 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3975 } else {
3976 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3977 "Done %s.\n", __func__);
3978 if (port_speed)
3979 *port_speed = mcp->mb[3];
3980 }
3981
3982 return rval;
3983 }
3984
3985 int
qla2x00_set_idma_speed(scsi_qla_host_t * vha,uint16_t loop_id,uint16_t port_speed,uint16_t * mb)3986 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3987 uint16_t port_speed, uint16_t *mb)
3988 {
3989 int rval;
3990 mbx_cmd_t mc;
3991 mbx_cmd_t *mcp = &mc;
3992
3993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3994 "Entered %s.\n", __func__);
3995
3996 if (!IS_IIDMA_CAPABLE(vha->hw))
3997 return QLA_FUNCTION_FAILED;
3998
3999 mcp->mb[0] = MBC_PORT_PARAMS;
4000 mcp->mb[1] = loop_id;
4001 mcp->mb[2] = BIT_0;
4002 mcp->mb[3] = port_speed & 0x3F;
4003 mcp->mb[9] = vha->vp_idx;
4004 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
4005 mcp->in_mb = MBX_3|MBX_1|MBX_0;
4006 mcp->tov = MBX_TOV_SECONDS;
4007 mcp->flags = 0;
4008 rval = qla2x00_mailbox_command(vha, mcp);
4009
4010 /* Return mailbox statuses. */
4011 if (mb) {
4012 mb[0] = mcp->mb[0];
4013 mb[1] = mcp->mb[1];
4014 mb[3] = mcp->mb[3];
4015 }
4016
4017 if (rval != QLA_SUCCESS) {
4018 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
4019 "Failed=%x.\n", rval);
4020 } else {
4021 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
4022 "Done %s.\n", __func__);
4023 }
4024
4025 return rval;
4026 }
4027
4028 void
qla24xx_report_id_acquisition(scsi_qla_host_t * vha,struct vp_rpt_id_entry_24xx * rptid_entry)4029 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
4030 struct vp_rpt_id_entry_24xx *rptid_entry)
4031 {
4032 struct qla_hw_data *ha = vha->hw;
4033 scsi_qla_host_t *vp = NULL;
4034 unsigned long flags;
4035 int found;
4036 port_id_t id;
4037 struct fc_port *fcport;
4038
4039 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
4040 "Entered %s.\n", __func__);
4041
4042 if (rptid_entry->entry_status != 0)
4043 return;
4044
4045 id.b.domain = rptid_entry->port_id[2];
4046 id.b.area = rptid_entry->port_id[1];
4047 id.b.al_pa = rptid_entry->port_id[0];
4048 id.b.rsvd_1 = 0;
4049 ha->flags.n2n_ae = 0;
4050
4051 if (rptid_entry->format == 0) {
4052 /* loop */
4053 ql_dbg(ql_dbg_async, vha, 0x10b7,
4054 "Format 0 : Number of VPs setup %d, number of "
4055 "VPs acquired %d.\n", rptid_entry->vp_setup,
4056 rptid_entry->vp_acquired);
4057 ql_dbg(ql_dbg_async, vha, 0x10b8,
4058 "Primary port id %02x%02x%02x.\n",
4059 rptid_entry->port_id[2], rptid_entry->port_id[1],
4060 rptid_entry->port_id[0]);
4061 ha->current_topology = ISP_CFG_NL;
4062 qla_update_host_map(vha, id);
4063
4064 } else if (rptid_entry->format == 1) {
4065 /* fabric */
4066 ql_dbg(ql_dbg_async, vha, 0x10b9,
4067 "Format 1: VP[%d] enabled - status %d - with "
4068 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
4069 rptid_entry->vp_status,
4070 rptid_entry->port_id[2], rptid_entry->port_id[1],
4071 rptid_entry->port_id[0]);
4072 ql_dbg(ql_dbg_async, vha, 0x5075,
4073 "Format 1: Remote WWPN %8phC.\n",
4074 rptid_entry->u.f1.port_name);
4075
4076 ql_dbg(ql_dbg_async, vha, 0x5075,
4077 "Format 1: WWPN %8phC.\n",
4078 vha->port_name);
4079
4080 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
4081 case TOPO_N2N:
4082 ha->current_topology = ISP_CFG_N;
4083 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4084 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4085 fcport->scan_state = QLA_FCPORT_SCAN;
4086 fcport->n2n_flag = 0;
4087 }
4088 id.b24 = 0;
4089 if (wwn_to_u64(vha->port_name) >
4090 wwn_to_u64(rptid_entry->u.f1.port_name)) {
4091 vha->d_id.b24 = 0;
4092 vha->d_id.b.al_pa = 1;
4093 ha->flags.n2n_bigger = 1;
4094
4095 id.b.al_pa = 2;
4096 ql_dbg(ql_dbg_async, vha, 0x5075,
4097 "Format 1: assign local id %x remote id %x\n",
4098 vha->d_id.b24, id.b24);
4099 } else {
4100 ql_dbg(ql_dbg_async, vha, 0x5075,
4101 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
4102 rptid_entry->u.f1.port_name);
4103 ha->flags.n2n_bigger = 0;
4104 }
4105
4106 fcport = qla2x00_find_fcport_by_wwpn(vha,
4107 rptid_entry->u.f1.port_name, 1);
4108 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4109
4110
4111 if (fcport) {
4112 fcport->plogi_nack_done_deadline = jiffies + HZ;
4113 fcport->dm_login_expire = jiffies +
4114 QLA_N2N_WAIT_TIME * HZ;
4115 fcport->scan_state = QLA_FCPORT_FOUND;
4116 fcport->n2n_flag = 1;
4117 fcport->keep_nport_handle = 1;
4118 fcport->login_retry = vha->hw->login_retry_count;
4119 fcport->fc4_type = FS_FC4TYPE_FCP;
4120 if (vha->flags.nvme_enabled)
4121 fcport->fc4_type |= FS_FC4TYPE_NVME;
4122
4123 if (wwn_to_u64(vha->port_name) >
4124 wwn_to_u64(fcport->port_name)) {
4125 fcport->d_id = id;
4126 }
4127
4128 switch (fcport->disc_state) {
4129 case DSC_DELETED:
4130 set_bit(RELOGIN_NEEDED,
4131 &vha->dpc_flags);
4132 break;
4133 case DSC_DELETE_PEND:
4134 break;
4135 default:
4136 qlt_schedule_sess_for_deletion(fcport);
4137 break;
4138 }
4139 } else {
4140 qla24xx_post_newsess_work(vha, &id,
4141 rptid_entry->u.f1.port_name,
4142 rptid_entry->u.f1.node_name,
4143 NULL,
4144 FS_FCP_IS_N2N);
4145 }
4146
4147 /* if our portname is higher then initiate N2N login */
4148
4149 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4150 return;
4151 case TOPO_FL:
4152 ha->current_topology = ISP_CFG_FL;
4153 break;
4154 case TOPO_F:
4155 ha->current_topology = ISP_CFG_F;
4156 break;
4157 default:
4158 break;
4159 }
4160
4161 ha->flags.gpsc_supported = 1;
4162 ha->current_topology = ISP_CFG_F;
4163 /* buffer to buffer credit flag */
4164 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
4165
4166 if (rptid_entry->vp_idx == 0) {
4167 if (rptid_entry->vp_status == VP_STAT_COMPL) {
4168 /* FA-WWN is only for physical port */
4169 if (qla_ini_mode_enabled(vha) &&
4170 ha->flags.fawwpn_enabled &&
4171 (rptid_entry->u.f1.flags &
4172 BIT_6)) {
4173 memcpy(vha->port_name,
4174 rptid_entry->u.f1.port_name,
4175 WWN_SIZE);
4176 }
4177
4178 qla_update_host_map(vha, id);
4179 }
4180
4181 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
4182 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4183 } else {
4184 if (rptid_entry->vp_status != VP_STAT_COMPL &&
4185 rptid_entry->vp_status != VP_STAT_ID_CHG) {
4186 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
4187 "Could not acquire ID for VP[%d].\n",
4188 rptid_entry->vp_idx);
4189 return;
4190 }
4191
4192 found = 0;
4193 spin_lock_irqsave(&ha->vport_slock, flags);
4194 list_for_each_entry(vp, &ha->vp_list, list) {
4195 if (rptid_entry->vp_idx == vp->vp_idx) {
4196 found = 1;
4197 break;
4198 }
4199 }
4200 spin_unlock_irqrestore(&ha->vport_slock, flags);
4201
4202 if (!found)
4203 return;
4204
4205 qla_update_host_map(vp, id);
4206
4207 /*
4208 * Cannot configure here as we are still sitting on the
4209 * response queue. Handle it in dpc context.
4210 */
4211 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4212 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4213 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4214 }
4215 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4216 qla2xxx_wake_dpc(vha);
4217 } else if (rptid_entry->format == 2) {
4218 ql_dbg(ql_dbg_async, vha, 0x505f,
4219 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4220 rptid_entry->port_id[2], rptid_entry->port_id[1],
4221 rptid_entry->port_id[0]);
4222
4223 ql_dbg(ql_dbg_async, vha, 0x5075,
4224 "N2N: Remote WWPN %8phC.\n",
4225 rptid_entry->u.f2.port_name);
4226
4227 /* N2N. direct connect */
4228 ha->current_topology = ISP_CFG_N;
4229 ha->flags.rida_fmt2 = 1;
4230 vha->d_id.b.domain = rptid_entry->port_id[2];
4231 vha->d_id.b.area = rptid_entry->port_id[1];
4232 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4233
4234 ha->flags.n2n_ae = 1;
4235 spin_lock_irqsave(&ha->vport_slock, flags);
4236 qla_update_vp_map(vha, SET_AL_PA);
4237 spin_unlock_irqrestore(&ha->vport_slock, flags);
4238
4239 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4240 fcport->scan_state = QLA_FCPORT_SCAN;
4241 fcport->n2n_flag = 0;
4242 }
4243
4244 fcport = qla2x00_find_fcport_by_wwpn(vha,
4245 rptid_entry->u.f2.port_name, 1);
4246
4247 if (fcport) {
4248 fcport->login_retry = vha->hw->login_retry_count;
4249 fcport->plogi_nack_done_deadline = jiffies + HZ;
4250 fcport->scan_state = QLA_FCPORT_FOUND;
4251 fcport->keep_nport_handle = 1;
4252 fcport->n2n_flag = 1;
4253 fcport->d_id.b.domain =
4254 rptid_entry->u.f2.remote_nport_id[2];
4255 fcport->d_id.b.area =
4256 rptid_entry->u.f2.remote_nport_id[1];
4257 fcport->d_id.b.al_pa =
4258 rptid_entry->u.f2.remote_nport_id[0];
4259
4260 /*
4261 * For the case where remote port sending PRLO, FW
4262 * sends up RIDA Format 2 as an indication of session
4263 * loss. In other word, FW state change from PRLI
4264 * complete back to PLOGI complete. Delete the
4265 * session and let relogin drive the reconnect.
4266 */
4267 if (atomic_read(&fcport->state) == FCS_ONLINE)
4268 qlt_schedule_sess_for_deletion(fcport);
4269 }
4270 }
4271 }
4272
4273 /*
4274 * qla24xx_modify_vp_config
4275 * Change VP configuration for vha
4276 *
4277 * Input:
4278 * vha = adapter block pointer.
4279 *
4280 * Returns:
4281 * qla2xxx local function return status code.
4282 *
4283 * Context:
4284 * Kernel context.
4285 */
4286 int
qla24xx_modify_vp_config(scsi_qla_host_t * vha)4287 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4288 {
4289 int rval;
4290 struct vp_config_entry_24xx *vpmod;
4291 dma_addr_t vpmod_dma;
4292 struct qla_hw_data *ha = vha->hw;
4293 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4294
4295 /* This can be called by the parent */
4296
4297 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4298 "Entered %s.\n", __func__);
4299
4300 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4301 if (!vpmod) {
4302 ql_log(ql_log_warn, vha, 0x10bc,
4303 "Failed to allocate modify VP IOCB.\n");
4304 return QLA_MEMORY_ALLOC_FAILED;
4305 }
4306
4307 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4308 vpmod->entry_count = 1;
4309 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4310 vpmod->vp_count = 1;
4311 vpmod->vp_index1 = vha->vp_idx;
4312 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4313
4314 qlt_modify_vp_config(vha, vpmod);
4315
4316 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4317 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4318 vpmod->entry_count = 1;
4319
4320 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4321 if (rval != QLA_SUCCESS) {
4322 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4323 "Failed to issue VP config IOCB (%x).\n", rval);
4324 } else if (vpmod->comp_status != 0) {
4325 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4326 "Failed to complete IOCB -- error status (%x).\n",
4327 vpmod->comp_status);
4328 rval = QLA_FUNCTION_FAILED;
4329 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4330 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4331 "Failed to complete IOCB -- completion status (%x).\n",
4332 le16_to_cpu(vpmod->comp_status));
4333 rval = QLA_FUNCTION_FAILED;
4334 } else {
4335 /* EMPTY */
4336 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4337 "Done %s.\n", __func__);
4338 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4339 }
4340 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4341
4342 return rval;
4343 }
4344
4345 /*
4346 * qla2x00_send_change_request
4347 * Receive or disable RSCN request from fabric controller
4348 *
4349 * Input:
4350 * ha = adapter block pointer
4351 * format = registration format:
4352 * 0 - Reserved
4353 * 1 - Fabric detected registration
4354 * 2 - N_port detected registration
4355 * 3 - Full registration
4356 * FF - clear registration
4357 * vp_idx = Virtual port index
4358 *
4359 * Returns:
4360 * qla2x00 local function return status code.
4361 *
4362 * Context:
4363 * Kernel Context
4364 */
4365
4366 int
qla2x00_send_change_request(scsi_qla_host_t * vha,uint16_t format,uint16_t vp_idx)4367 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4368 uint16_t vp_idx)
4369 {
4370 int rval;
4371 mbx_cmd_t mc;
4372 mbx_cmd_t *mcp = &mc;
4373
4374 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4375 "Entered %s.\n", __func__);
4376
4377 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4378 mcp->mb[1] = format;
4379 mcp->mb[9] = vp_idx;
4380 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4381 mcp->in_mb = MBX_0|MBX_1;
4382 mcp->tov = MBX_TOV_SECONDS;
4383 mcp->flags = 0;
4384 rval = qla2x00_mailbox_command(vha, mcp);
4385
4386 if (rval == QLA_SUCCESS) {
4387 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4388 rval = BIT_1;
4389 }
4390 } else
4391 rval = BIT_1;
4392
4393 return rval;
4394 }
4395
4396 int
qla2x00_dump_ram(scsi_qla_host_t * vha,dma_addr_t req_dma,uint32_t addr,uint32_t size)4397 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4398 uint32_t size)
4399 {
4400 int rval;
4401 mbx_cmd_t mc;
4402 mbx_cmd_t *mcp = &mc;
4403
4404 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4405 "Entered %s.\n", __func__);
4406
4407 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4408 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4409 mcp->mb[8] = MSW(addr);
4410 mcp->mb[10] = 0;
4411 mcp->out_mb = MBX_10|MBX_8|MBX_0;
4412 } else {
4413 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4414 mcp->out_mb = MBX_0;
4415 }
4416 mcp->mb[1] = LSW(addr);
4417 mcp->mb[2] = MSW(req_dma);
4418 mcp->mb[3] = LSW(req_dma);
4419 mcp->mb[6] = MSW(MSD(req_dma));
4420 mcp->mb[7] = LSW(MSD(req_dma));
4421 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4422 if (IS_FWI2_CAPABLE(vha->hw)) {
4423 mcp->mb[4] = MSW(size);
4424 mcp->mb[5] = LSW(size);
4425 mcp->out_mb |= MBX_5|MBX_4;
4426 } else {
4427 mcp->mb[4] = LSW(size);
4428 mcp->out_mb |= MBX_4;
4429 }
4430
4431 mcp->in_mb = MBX_0;
4432 mcp->tov = MBX_TOV_SECONDS;
4433 mcp->flags = 0;
4434 rval = qla2x00_mailbox_command(vha, mcp);
4435
4436 if (rval != QLA_SUCCESS) {
4437 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4438 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4439 } else {
4440 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4441 "Done %s.\n", __func__);
4442 }
4443
4444 return rval;
4445 }
4446 /* 84XX Support **************************************************************/
4447
4448 struct cs84xx_mgmt_cmd {
4449 union {
4450 struct verify_chip_entry_84xx req;
4451 struct verify_chip_rsp_84xx rsp;
4452 } p;
4453 };
4454
4455 int
qla84xx_verify_chip(struct scsi_qla_host * vha,uint16_t * status)4456 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4457 {
4458 int rval, retry;
4459 struct cs84xx_mgmt_cmd *mn;
4460 dma_addr_t mn_dma;
4461 uint16_t options;
4462 unsigned long flags;
4463 struct qla_hw_data *ha = vha->hw;
4464
4465 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4466 "Entered %s.\n", __func__);
4467
4468 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4469 if (mn == NULL) {
4470 return QLA_MEMORY_ALLOC_FAILED;
4471 }
4472
4473 /* Force Update? */
4474 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4475 /* Diagnostic firmware? */
4476 /* options |= MENLO_DIAG_FW; */
4477 /* We update the firmware with only one data sequence. */
4478 options |= VCO_END_OF_DATA;
4479
4480 do {
4481 retry = 0;
4482 memset(mn, 0, sizeof(*mn));
4483 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4484 mn->p.req.entry_count = 1;
4485 mn->p.req.options = cpu_to_le16(options);
4486
4487 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4488 "Dump of Verify Request.\n");
4489 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4490 mn, sizeof(*mn));
4491
4492 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4493 if (rval != QLA_SUCCESS) {
4494 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4495 "Failed to issue verify IOCB (%x).\n", rval);
4496 goto verify_done;
4497 }
4498
4499 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4500 "Dump of Verify Response.\n");
4501 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4502 mn, sizeof(*mn));
4503
4504 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4505 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4506 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4507 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4508 "cs=%x fc=%x.\n", status[0], status[1]);
4509
4510 if (status[0] != CS_COMPLETE) {
4511 rval = QLA_FUNCTION_FAILED;
4512 if (!(options & VCO_DONT_UPDATE_FW)) {
4513 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4514 "Firmware update failed. Retrying "
4515 "without update firmware.\n");
4516 options |= VCO_DONT_UPDATE_FW;
4517 options &= ~VCO_FORCE_UPDATE;
4518 retry = 1;
4519 }
4520 } else {
4521 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4522 "Firmware updated to %x.\n",
4523 le32_to_cpu(mn->p.rsp.fw_ver));
4524
4525 /* NOTE: we only update OP firmware. */
4526 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4527 ha->cs84xx->op_fw_version =
4528 le32_to_cpu(mn->p.rsp.fw_ver);
4529 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4530 flags);
4531 }
4532 } while (retry);
4533
4534 verify_done:
4535 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4536
4537 if (rval != QLA_SUCCESS) {
4538 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4539 "Failed=%x.\n", rval);
4540 } else {
4541 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4542 "Done %s.\n", __func__);
4543 }
4544
4545 return rval;
4546 }
4547
4548 int
qla25xx_init_req_que(struct scsi_qla_host * vha,struct req_que * req)4549 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4550 {
4551 int rval;
4552 unsigned long flags;
4553 mbx_cmd_t mc;
4554 mbx_cmd_t *mcp = &mc;
4555 struct qla_hw_data *ha = vha->hw;
4556
4557 if (!ha->flags.fw_started)
4558 return QLA_SUCCESS;
4559
4560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4561 "Entered %s.\n", __func__);
4562
4563 if (IS_SHADOW_REG_CAPABLE(ha))
4564 req->options |= BIT_13;
4565
4566 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4567 mcp->mb[1] = req->options;
4568 mcp->mb[2] = MSW(LSD(req->dma));
4569 mcp->mb[3] = LSW(LSD(req->dma));
4570 mcp->mb[6] = MSW(MSD(req->dma));
4571 mcp->mb[7] = LSW(MSD(req->dma));
4572 mcp->mb[5] = req->length;
4573 if (req->rsp)
4574 mcp->mb[10] = req->rsp->id;
4575 mcp->mb[12] = req->qos;
4576 mcp->mb[11] = req->vp_idx;
4577 mcp->mb[13] = req->rid;
4578 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4579 mcp->mb[15] = 0;
4580
4581 mcp->mb[4] = req->id;
4582 /* que in ptr index */
4583 mcp->mb[8] = 0;
4584 /* que out ptr index */
4585 mcp->mb[9] = *req->out_ptr = 0;
4586 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4587 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4588 mcp->in_mb = MBX_0;
4589 mcp->flags = MBX_DMA_OUT;
4590 mcp->tov = MBX_TOV_SECONDS * 2;
4591
4592 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4593 IS_QLA28XX(ha))
4594 mcp->in_mb |= MBX_1;
4595 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4596 mcp->out_mb |= MBX_15;
4597 /* debug q create issue in SR-IOV */
4598 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4599 }
4600
4601 spin_lock_irqsave(&ha->hardware_lock, flags);
4602 if (!(req->options & BIT_0)) {
4603 wrt_reg_dword(req->req_q_in, 0);
4604 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4605 wrt_reg_dword(req->req_q_out, 0);
4606 }
4607 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4608
4609 rval = qla2x00_mailbox_command(vha, mcp);
4610 if (rval != QLA_SUCCESS) {
4611 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4612 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4613 } else {
4614 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4615 "Done %s.\n", __func__);
4616 }
4617
4618 return rval;
4619 }
4620
4621 int
qla25xx_init_rsp_que(struct scsi_qla_host * vha,struct rsp_que * rsp)4622 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4623 {
4624 int rval;
4625 unsigned long flags;
4626 mbx_cmd_t mc;
4627 mbx_cmd_t *mcp = &mc;
4628 struct qla_hw_data *ha = vha->hw;
4629
4630 if (!ha->flags.fw_started)
4631 return QLA_SUCCESS;
4632
4633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4634 "Entered %s.\n", __func__);
4635
4636 if (IS_SHADOW_REG_CAPABLE(ha))
4637 rsp->options |= BIT_13;
4638
4639 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4640 mcp->mb[1] = rsp->options;
4641 mcp->mb[2] = MSW(LSD(rsp->dma));
4642 mcp->mb[3] = LSW(LSD(rsp->dma));
4643 mcp->mb[6] = MSW(MSD(rsp->dma));
4644 mcp->mb[7] = LSW(MSD(rsp->dma));
4645 mcp->mb[5] = rsp->length;
4646 mcp->mb[14] = rsp->msix->entry;
4647 mcp->mb[13] = rsp->rid;
4648 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4649 mcp->mb[15] = 0;
4650
4651 mcp->mb[4] = rsp->id;
4652 /* que in ptr index */
4653 mcp->mb[8] = *rsp->in_ptr = 0;
4654 /* que out ptr index */
4655 mcp->mb[9] = 0;
4656 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4657 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4658 mcp->in_mb = MBX_0;
4659 mcp->flags = MBX_DMA_OUT;
4660 mcp->tov = MBX_TOV_SECONDS * 2;
4661
4662 if (IS_QLA81XX(ha)) {
4663 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4664 mcp->in_mb |= MBX_1;
4665 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4666 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4667 mcp->in_mb |= MBX_1;
4668 /* debug q create issue in SR-IOV */
4669 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4670 }
4671
4672 spin_lock_irqsave(&ha->hardware_lock, flags);
4673 if (!(rsp->options & BIT_0)) {
4674 wrt_reg_dword(rsp->rsp_q_out, 0);
4675 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4676 wrt_reg_dword(rsp->rsp_q_in, 0);
4677 }
4678
4679 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4680
4681 rval = qla2x00_mailbox_command(vha, mcp);
4682 if (rval != QLA_SUCCESS) {
4683 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4684 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4685 } else {
4686 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4687 "Done %s.\n", __func__);
4688 }
4689
4690 return rval;
4691 }
4692
4693 int
qla81xx_idc_ack(scsi_qla_host_t * vha,uint16_t * mb)4694 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4695 {
4696 int rval;
4697 mbx_cmd_t mc;
4698 mbx_cmd_t *mcp = &mc;
4699
4700 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4701 "Entered %s.\n", __func__);
4702
4703 mcp->mb[0] = MBC_IDC_ACK;
4704 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4705 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4706 mcp->in_mb = MBX_0;
4707 mcp->tov = MBX_TOV_SECONDS;
4708 mcp->flags = 0;
4709 rval = qla2x00_mailbox_command(vha, mcp);
4710
4711 if (rval != QLA_SUCCESS) {
4712 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4713 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4714 } else {
4715 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4716 "Done %s.\n", __func__);
4717 }
4718
4719 return rval;
4720 }
4721
4722 int
qla81xx_fac_get_sector_size(scsi_qla_host_t * vha,uint32_t * sector_size)4723 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4724 {
4725 int rval;
4726 mbx_cmd_t mc;
4727 mbx_cmd_t *mcp = &mc;
4728
4729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4730 "Entered %s.\n", __func__);
4731
4732 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4733 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4734 return QLA_FUNCTION_FAILED;
4735
4736 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4737 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4738 mcp->out_mb = MBX_1|MBX_0;
4739 mcp->in_mb = MBX_1|MBX_0;
4740 mcp->tov = MBX_TOV_SECONDS;
4741 mcp->flags = 0;
4742 rval = qla2x00_mailbox_command(vha, mcp);
4743
4744 if (rval != QLA_SUCCESS) {
4745 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4746 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4747 rval, mcp->mb[0], mcp->mb[1]);
4748 } else {
4749 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4750 "Done %s.\n", __func__);
4751 *sector_size = mcp->mb[1];
4752 }
4753
4754 return rval;
4755 }
4756
4757 int
qla81xx_fac_do_write_enable(scsi_qla_host_t * vha,int enable)4758 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4759 {
4760 int rval;
4761 mbx_cmd_t mc;
4762 mbx_cmd_t *mcp = &mc;
4763
4764 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4765 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4766 return QLA_FUNCTION_FAILED;
4767
4768 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4769 "Entered %s.\n", __func__);
4770
4771 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4772 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4773 FAC_OPT_CMD_WRITE_PROTECT;
4774 mcp->out_mb = MBX_1|MBX_0;
4775 mcp->in_mb = MBX_1|MBX_0;
4776 mcp->tov = MBX_TOV_SECONDS;
4777 mcp->flags = 0;
4778 rval = qla2x00_mailbox_command(vha, mcp);
4779
4780 if (rval != QLA_SUCCESS) {
4781 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4782 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4783 rval, mcp->mb[0], mcp->mb[1]);
4784 } else {
4785 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4786 "Done %s.\n", __func__);
4787 }
4788
4789 return rval;
4790 }
4791
4792 int
qla81xx_fac_erase_sector(scsi_qla_host_t * vha,uint32_t start,uint32_t finish)4793 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4794 {
4795 int rval;
4796 mbx_cmd_t mc;
4797 mbx_cmd_t *mcp = &mc;
4798
4799 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4800 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4801 return QLA_FUNCTION_FAILED;
4802
4803 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4804 "Entered %s.\n", __func__);
4805
4806 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4807 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4808 mcp->mb[2] = LSW(start);
4809 mcp->mb[3] = MSW(start);
4810 mcp->mb[4] = LSW(finish);
4811 mcp->mb[5] = MSW(finish);
4812 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4813 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4814 mcp->tov = MBX_TOV_SECONDS;
4815 mcp->flags = 0;
4816 rval = qla2x00_mailbox_command(vha, mcp);
4817
4818 if (rval != QLA_SUCCESS) {
4819 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4820 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4821 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4822 } else {
4823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4824 "Done %s.\n", __func__);
4825 }
4826
4827 return rval;
4828 }
4829
4830 int
qla81xx_fac_semaphore_access(scsi_qla_host_t * vha,int lock)4831 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4832 {
4833 int rval = QLA_SUCCESS;
4834 mbx_cmd_t mc;
4835 mbx_cmd_t *mcp = &mc;
4836 struct qla_hw_data *ha = vha->hw;
4837
4838 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4839 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4840 return rval;
4841
4842 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4843 "Entered %s.\n", __func__);
4844
4845 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4846 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4847 FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4848 mcp->out_mb = MBX_1|MBX_0;
4849 mcp->in_mb = MBX_1|MBX_0;
4850 mcp->tov = MBX_TOV_SECONDS;
4851 mcp->flags = 0;
4852 rval = qla2x00_mailbox_command(vha, mcp);
4853
4854 if (rval != QLA_SUCCESS) {
4855 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4856 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4857 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4858 } else {
4859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4860 "Done %s.\n", __func__);
4861 }
4862
4863 return rval;
4864 }
4865
4866 int
qla81xx_restart_mpi_firmware(scsi_qla_host_t * vha)4867 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4868 {
4869 int rval = 0;
4870 mbx_cmd_t mc;
4871 mbx_cmd_t *mcp = &mc;
4872
4873 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4874 "Entered %s.\n", __func__);
4875
4876 mcp->mb[0] = MBC_RESTART_MPI_FW;
4877 mcp->out_mb = MBX_0;
4878 mcp->in_mb = MBX_0|MBX_1;
4879 mcp->tov = MBX_TOV_SECONDS;
4880 mcp->flags = 0;
4881 rval = qla2x00_mailbox_command(vha, mcp);
4882
4883 if (rval != QLA_SUCCESS) {
4884 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4885 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4886 rval, mcp->mb[0], mcp->mb[1]);
4887 } else {
4888 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4889 "Done %s.\n", __func__);
4890 }
4891
4892 return rval;
4893 }
4894
4895 int
qla82xx_set_driver_version(scsi_qla_host_t * vha,char * version)4896 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4897 {
4898 int rval;
4899 mbx_cmd_t mc;
4900 mbx_cmd_t *mcp = &mc;
4901 int i;
4902 int len;
4903 __le16 *str;
4904 struct qla_hw_data *ha = vha->hw;
4905
4906 if (!IS_P3P_TYPE(ha))
4907 return QLA_FUNCTION_FAILED;
4908
4909 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4910 "Entered %s.\n", __func__);
4911
4912 str = (__force __le16 *)version;
4913 len = strlen(version);
4914
4915 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4916 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4917 mcp->out_mb = MBX_1|MBX_0;
4918 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4919 mcp->mb[i] = le16_to_cpup(str);
4920 mcp->out_mb |= 1<<i;
4921 }
4922 for (; i < 16; i++) {
4923 mcp->mb[i] = 0;
4924 mcp->out_mb |= 1<<i;
4925 }
4926 mcp->in_mb = MBX_1|MBX_0;
4927 mcp->tov = MBX_TOV_SECONDS;
4928 mcp->flags = 0;
4929 rval = qla2x00_mailbox_command(vha, mcp);
4930
4931 if (rval != QLA_SUCCESS) {
4932 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4933 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4934 } else {
4935 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4936 "Done %s.\n", __func__);
4937 }
4938
4939 return rval;
4940 }
4941
4942 int
qla25xx_set_driver_version(scsi_qla_host_t * vha,char * version)4943 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4944 {
4945 int rval;
4946 mbx_cmd_t mc;
4947 mbx_cmd_t *mcp = &mc;
4948 int len;
4949 uint16_t dwlen;
4950 uint8_t *str;
4951 dma_addr_t str_dma;
4952 struct qla_hw_data *ha = vha->hw;
4953
4954 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4955 IS_P3P_TYPE(ha))
4956 return QLA_FUNCTION_FAILED;
4957
4958 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4959 "Entered %s.\n", __func__);
4960
4961 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4962 if (!str) {
4963 ql_log(ql_log_warn, vha, 0x117f,
4964 "Failed to allocate driver version param.\n");
4965 return QLA_MEMORY_ALLOC_FAILED;
4966 }
4967
4968 memcpy(str, "\x7\x3\x11\x0", 4);
4969 dwlen = str[0];
4970 len = dwlen * 4 - 4;
4971 memset(str + 4, 0, len);
4972 if (len > strlen(version))
4973 len = strlen(version);
4974 memcpy(str + 4, version, len);
4975
4976 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4977 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4978 mcp->mb[2] = MSW(LSD(str_dma));
4979 mcp->mb[3] = LSW(LSD(str_dma));
4980 mcp->mb[6] = MSW(MSD(str_dma));
4981 mcp->mb[7] = LSW(MSD(str_dma));
4982 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4983 mcp->in_mb = MBX_1|MBX_0;
4984 mcp->tov = MBX_TOV_SECONDS;
4985 mcp->flags = 0;
4986 rval = qla2x00_mailbox_command(vha, mcp);
4987
4988 if (rval != QLA_SUCCESS) {
4989 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4990 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4991 } else {
4992 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4993 "Done %s.\n", __func__);
4994 }
4995
4996 dma_pool_free(ha->s_dma_pool, str, str_dma);
4997
4998 return rval;
4999 }
5000
5001 int
qla24xx_get_port_login_templ(scsi_qla_host_t * vha,dma_addr_t buf_dma,void * buf,uint16_t bufsiz)5002 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
5003 void *buf, uint16_t bufsiz)
5004 {
5005 int rval, i;
5006 mbx_cmd_t mc;
5007 mbx_cmd_t *mcp = &mc;
5008 uint32_t *bp;
5009
5010 if (!IS_FWI2_CAPABLE(vha->hw))
5011 return QLA_FUNCTION_FAILED;
5012
5013 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
5014 "Entered %s.\n", __func__);
5015
5016 mcp->mb[0] = MBC_GET_RNID_PARAMS;
5017 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
5018 mcp->mb[2] = MSW(buf_dma);
5019 mcp->mb[3] = LSW(buf_dma);
5020 mcp->mb[6] = MSW(MSD(buf_dma));
5021 mcp->mb[7] = LSW(MSD(buf_dma));
5022 mcp->mb[8] = bufsiz/4;
5023 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5024 mcp->in_mb = MBX_1|MBX_0;
5025 mcp->tov = MBX_TOV_SECONDS;
5026 mcp->flags = 0;
5027 rval = qla2x00_mailbox_command(vha, mcp);
5028
5029 if (rval != QLA_SUCCESS) {
5030 ql_dbg(ql_dbg_mbx, vha, 0x115a,
5031 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
5032 } else {
5033 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
5034 "Done %s.\n", __func__);
5035 bp = (uint32_t *) buf;
5036 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
5037 *bp = le32_to_cpu((__force __le32)*bp);
5038 }
5039
5040 return rval;
5041 }
5042
5043 #define PUREX_CMD_COUNT 4
5044 int
qla25xx_set_els_cmds_supported(scsi_qla_host_t * vha)5045 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
5046 {
5047 int rval;
5048 mbx_cmd_t mc;
5049 mbx_cmd_t *mcp = &mc;
5050 uint8_t *els_cmd_map;
5051 uint8_t active_cnt = 0;
5052 dma_addr_t els_cmd_map_dma;
5053 uint8_t cmd_opcode[PUREX_CMD_COUNT];
5054 uint8_t i, index, purex_bit;
5055 struct qla_hw_data *ha = vha->hw;
5056
5057 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
5058 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5059 return QLA_SUCCESS;
5060
5061 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
5062 "Entered %s.\n", __func__);
5063
5064 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5065 &els_cmd_map_dma, GFP_KERNEL);
5066 if (!els_cmd_map) {
5067 ql_log(ql_log_warn, vha, 0x7101,
5068 "Failed to allocate RDP els command param.\n");
5069 return QLA_MEMORY_ALLOC_FAILED;
5070 }
5071
5072 /* List of Purex ELS */
5073 if (ql2xrdpenable) {
5074 cmd_opcode[active_cnt] = ELS_RDP;
5075 active_cnt++;
5076 }
5077 if (ha->flags.scm_supported_f) {
5078 cmd_opcode[active_cnt] = ELS_FPIN;
5079 active_cnt++;
5080 }
5081 if (ha->flags.edif_enabled) {
5082 cmd_opcode[active_cnt] = ELS_AUTH_ELS;
5083 active_cnt++;
5084 }
5085
5086 for (i = 0; i < active_cnt; i++) {
5087 index = cmd_opcode[i] / 8;
5088 purex_bit = cmd_opcode[i] % 8;
5089 els_cmd_map[index] |= 1 << purex_bit;
5090 }
5091
5092 mcp->mb[0] = MBC_SET_RNID_PARAMS;
5093 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
5094 mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
5095 mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
5096 mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
5097 mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
5098 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5099 mcp->in_mb = MBX_1|MBX_0;
5100 mcp->tov = MBX_TOV_SECONDS;
5101 mcp->flags = MBX_DMA_OUT;
5102 mcp->buf_size = ELS_CMD_MAP_SIZE;
5103 rval = qla2x00_mailbox_command(vha, mcp);
5104
5105 if (rval != QLA_SUCCESS) {
5106 ql_dbg(ql_dbg_mbx, vha, 0x118d,
5107 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
5108 } else {
5109 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
5110 "Done %s.\n", __func__);
5111 }
5112
5113 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5114 els_cmd_map, els_cmd_map_dma);
5115
5116 return rval;
5117 }
5118
5119 static int
qla2x00_read_asic_temperature(scsi_qla_host_t * vha,uint16_t * temp)5120 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
5121 {
5122 int rval;
5123 mbx_cmd_t mc;
5124 mbx_cmd_t *mcp = &mc;
5125
5126 if (!IS_FWI2_CAPABLE(vha->hw))
5127 return QLA_FUNCTION_FAILED;
5128
5129 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
5130 "Entered %s.\n", __func__);
5131
5132 mcp->mb[0] = MBC_GET_RNID_PARAMS;
5133 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
5134 mcp->out_mb = MBX_1|MBX_0;
5135 mcp->in_mb = MBX_1|MBX_0;
5136 mcp->tov = MBX_TOV_SECONDS;
5137 mcp->flags = 0;
5138 rval = qla2x00_mailbox_command(vha, mcp);
5139 *temp = mcp->mb[1];
5140
5141 if (rval != QLA_SUCCESS) {
5142 ql_dbg(ql_dbg_mbx, vha, 0x115a,
5143 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
5144 } else {
5145 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
5146 "Done %s.\n", __func__);
5147 }
5148
5149 return rval;
5150 }
5151
5152 int
qla2x00_read_sfp(scsi_qla_host_t * vha,dma_addr_t sfp_dma,uint8_t * sfp,uint16_t dev,uint16_t off,uint16_t len,uint16_t opt)5153 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5154 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5155 {
5156 int rval;
5157 mbx_cmd_t mc;
5158 mbx_cmd_t *mcp = &mc;
5159 struct qla_hw_data *ha = vha->hw;
5160
5161 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
5162 "Entered %s.\n", __func__);
5163
5164 if (!IS_FWI2_CAPABLE(ha))
5165 return QLA_FUNCTION_FAILED;
5166
5167 if (len == 1)
5168 opt |= BIT_0;
5169
5170 mcp->mb[0] = MBC_READ_SFP;
5171 mcp->mb[1] = dev;
5172 mcp->mb[2] = MSW(LSD(sfp_dma));
5173 mcp->mb[3] = LSW(LSD(sfp_dma));
5174 mcp->mb[6] = MSW(MSD(sfp_dma));
5175 mcp->mb[7] = LSW(MSD(sfp_dma));
5176 mcp->mb[8] = len;
5177 mcp->mb[9] = off;
5178 mcp->mb[10] = opt;
5179 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5180 mcp->in_mb = MBX_1|MBX_0;
5181 mcp->tov = MBX_TOV_SECONDS;
5182 mcp->flags = 0;
5183 rval = qla2x00_mailbox_command(vha, mcp);
5184
5185 if (opt & BIT_0)
5186 *sfp = mcp->mb[1];
5187
5188 if (rval != QLA_SUCCESS) {
5189 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
5190 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5191 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
5192 /* sfp is not there */
5193 rval = QLA_INTERFACE_ERROR;
5194 }
5195 } else {
5196 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
5197 "Done %s.\n", __func__);
5198 }
5199
5200 return rval;
5201 }
5202
5203 int
qla2x00_write_sfp(scsi_qla_host_t * vha,dma_addr_t sfp_dma,uint8_t * sfp,uint16_t dev,uint16_t off,uint16_t len,uint16_t opt)5204 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5205 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5206 {
5207 int rval;
5208 mbx_cmd_t mc;
5209 mbx_cmd_t *mcp = &mc;
5210 struct qla_hw_data *ha = vha->hw;
5211
5212 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
5213 "Entered %s.\n", __func__);
5214
5215 if (!IS_FWI2_CAPABLE(ha))
5216 return QLA_FUNCTION_FAILED;
5217
5218 if (len == 1)
5219 opt |= BIT_0;
5220
5221 if (opt & BIT_0)
5222 len = *sfp;
5223
5224 mcp->mb[0] = MBC_WRITE_SFP;
5225 mcp->mb[1] = dev;
5226 mcp->mb[2] = MSW(LSD(sfp_dma));
5227 mcp->mb[3] = LSW(LSD(sfp_dma));
5228 mcp->mb[6] = MSW(MSD(sfp_dma));
5229 mcp->mb[7] = LSW(MSD(sfp_dma));
5230 mcp->mb[8] = len;
5231 mcp->mb[9] = off;
5232 mcp->mb[10] = opt;
5233 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5234 mcp->in_mb = MBX_1|MBX_0;
5235 mcp->tov = MBX_TOV_SECONDS;
5236 mcp->flags = 0;
5237 rval = qla2x00_mailbox_command(vha, mcp);
5238
5239 if (rval != QLA_SUCCESS) {
5240 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
5241 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5242 } else {
5243 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
5244 "Done %s.\n", __func__);
5245 }
5246
5247 return rval;
5248 }
5249
5250 int
qla2x00_get_xgmac_stats(scsi_qla_host_t * vha,dma_addr_t stats_dma,uint16_t size_in_bytes,uint16_t * actual_size)5251 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
5252 uint16_t size_in_bytes, uint16_t *actual_size)
5253 {
5254 int rval;
5255 mbx_cmd_t mc;
5256 mbx_cmd_t *mcp = &mc;
5257
5258 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
5259 "Entered %s.\n", __func__);
5260
5261 if (!IS_CNA_CAPABLE(vha->hw))
5262 return QLA_FUNCTION_FAILED;
5263
5264 mcp->mb[0] = MBC_GET_XGMAC_STATS;
5265 mcp->mb[2] = MSW(stats_dma);
5266 mcp->mb[3] = LSW(stats_dma);
5267 mcp->mb[6] = MSW(MSD(stats_dma));
5268 mcp->mb[7] = LSW(MSD(stats_dma));
5269 mcp->mb[8] = size_in_bytes >> 2;
5270 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
5271 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5272 mcp->tov = MBX_TOV_SECONDS;
5273 mcp->flags = 0;
5274 rval = qla2x00_mailbox_command(vha, mcp);
5275
5276 if (rval != QLA_SUCCESS) {
5277 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
5278 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5279 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5280 } else {
5281 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
5282 "Done %s.\n", __func__);
5283
5284
5285 *actual_size = mcp->mb[2] << 2;
5286 }
5287
5288 return rval;
5289 }
5290
5291 int
qla2x00_get_dcbx_params(scsi_qla_host_t * vha,dma_addr_t tlv_dma,uint16_t size)5292 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5293 uint16_t size)
5294 {
5295 int rval;
5296 mbx_cmd_t mc;
5297 mbx_cmd_t *mcp = &mc;
5298
5299 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5300 "Entered %s.\n", __func__);
5301
5302 if (!IS_CNA_CAPABLE(vha->hw))
5303 return QLA_FUNCTION_FAILED;
5304
5305 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5306 mcp->mb[1] = 0;
5307 mcp->mb[2] = MSW(tlv_dma);
5308 mcp->mb[3] = LSW(tlv_dma);
5309 mcp->mb[6] = MSW(MSD(tlv_dma));
5310 mcp->mb[7] = LSW(MSD(tlv_dma));
5311 mcp->mb[8] = size;
5312 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5313 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5314 mcp->tov = MBX_TOV_SECONDS;
5315 mcp->flags = 0;
5316 rval = qla2x00_mailbox_command(vha, mcp);
5317
5318 if (rval != QLA_SUCCESS) {
5319 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5320 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5321 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5322 } else {
5323 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5324 "Done %s.\n", __func__);
5325 }
5326
5327 return rval;
5328 }
5329
5330 int
qla2x00_read_ram_word(scsi_qla_host_t * vha,uint32_t risc_addr,uint32_t * data)5331 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5332 {
5333 int rval;
5334 mbx_cmd_t mc;
5335 mbx_cmd_t *mcp = &mc;
5336
5337 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5338 "Entered %s.\n", __func__);
5339
5340 if (!IS_FWI2_CAPABLE(vha->hw))
5341 return QLA_FUNCTION_FAILED;
5342
5343 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5344 mcp->mb[1] = LSW(risc_addr);
5345 mcp->mb[8] = MSW(risc_addr);
5346 mcp->out_mb = MBX_8|MBX_1|MBX_0;
5347 mcp->in_mb = MBX_3|MBX_2|MBX_0;
5348 mcp->tov = MBX_TOV_SECONDS;
5349 mcp->flags = 0;
5350 rval = qla2x00_mailbox_command(vha, mcp);
5351 if (rval != QLA_SUCCESS) {
5352 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5353 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5354 } else {
5355 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5356 "Done %s.\n", __func__);
5357 *data = mcp->mb[3] << 16 | mcp->mb[2];
5358 }
5359
5360 return rval;
5361 }
5362
5363 int
qla2x00_loopback_test(scsi_qla_host_t * vha,struct msg_echo_lb * mreq,uint16_t * mresp)5364 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5365 uint16_t *mresp)
5366 {
5367 int rval;
5368 mbx_cmd_t mc;
5369 mbx_cmd_t *mcp = &mc;
5370
5371 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5372 "Entered %s.\n", __func__);
5373
5374 memset(mcp->mb, 0 , sizeof(mcp->mb));
5375 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5376 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5377
5378 /* transfer count */
5379 mcp->mb[10] = LSW(mreq->transfer_size);
5380 mcp->mb[11] = MSW(mreq->transfer_size);
5381
5382 /* send data address */
5383 mcp->mb[14] = LSW(mreq->send_dma);
5384 mcp->mb[15] = MSW(mreq->send_dma);
5385 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5386 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5387
5388 /* receive data address */
5389 mcp->mb[16] = LSW(mreq->rcv_dma);
5390 mcp->mb[17] = MSW(mreq->rcv_dma);
5391 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5392 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5393
5394 /* Iteration count */
5395 mcp->mb[18] = LSW(mreq->iteration_count);
5396 mcp->mb[19] = MSW(mreq->iteration_count);
5397
5398 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5399 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5400 if (IS_CNA_CAPABLE(vha->hw))
5401 mcp->out_mb |= MBX_2;
5402 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5403
5404 mcp->buf_size = mreq->transfer_size;
5405 mcp->tov = MBX_TOV_SECONDS;
5406 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5407
5408 rval = qla2x00_mailbox_command(vha, mcp);
5409
5410 if (rval != QLA_SUCCESS) {
5411 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5412 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5413 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5414 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5415 } else {
5416 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5417 "Done %s.\n", __func__);
5418 }
5419
5420 /* Copy mailbox information */
5421 memcpy( mresp, mcp->mb, 64);
5422 return rval;
5423 }
5424
5425 int
qla2x00_echo_test(scsi_qla_host_t * vha,struct msg_echo_lb * mreq,uint16_t * mresp)5426 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5427 uint16_t *mresp)
5428 {
5429 int rval;
5430 mbx_cmd_t mc;
5431 mbx_cmd_t *mcp = &mc;
5432 struct qla_hw_data *ha = vha->hw;
5433
5434 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5435 "Entered %s.\n", __func__);
5436
5437 memset(mcp->mb, 0 , sizeof(mcp->mb));
5438 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5439 /* BIT_6 specifies 64bit address */
5440 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5441 if (IS_CNA_CAPABLE(ha)) {
5442 mcp->mb[2] = vha->fcoe_fcf_idx;
5443 }
5444 mcp->mb[16] = LSW(mreq->rcv_dma);
5445 mcp->mb[17] = MSW(mreq->rcv_dma);
5446 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5447 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5448
5449 mcp->mb[10] = LSW(mreq->transfer_size);
5450
5451 mcp->mb[14] = LSW(mreq->send_dma);
5452 mcp->mb[15] = MSW(mreq->send_dma);
5453 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5454 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5455
5456 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5457 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5458 if (IS_CNA_CAPABLE(ha))
5459 mcp->out_mb |= MBX_2;
5460
5461 mcp->in_mb = MBX_0;
5462 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5463 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5464 mcp->in_mb |= MBX_1;
5465 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5466 IS_QLA28XX(ha))
5467 mcp->in_mb |= MBX_3;
5468
5469 mcp->tov = MBX_TOV_SECONDS;
5470 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5471 mcp->buf_size = mreq->transfer_size;
5472
5473 rval = qla2x00_mailbox_command(vha, mcp);
5474
5475 if (rval != QLA_SUCCESS) {
5476 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5477 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5478 rval, mcp->mb[0], mcp->mb[1]);
5479 } else {
5480 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5481 "Done %s.\n", __func__);
5482 }
5483
5484 /* Copy mailbox information */
5485 memcpy(mresp, mcp->mb, 64);
5486 return rval;
5487 }
5488
5489 int
qla84xx_reset_chip(scsi_qla_host_t * vha,uint16_t enable_diagnostic)5490 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5491 {
5492 int rval;
5493 mbx_cmd_t mc;
5494 mbx_cmd_t *mcp = &mc;
5495
5496 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5497 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5498
5499 mcp->mb[0] = MBC_ISP84XX_RESET;
5500 mcp->mb[1] = enable_diagnostic;
5501 mcp->out_mb = MBX_1|MBX_0;
5502 mcp->in_mb = MBX_1|MBX_0;
5503 mcp->tov = MBX_TOV_SECONDS;
5504 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5505 rval = qla2x00_mailbox_command(vha, mcp);
5506
5507 if (rval != QLA_SUCCESS)
5508 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5509 else
5510 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5511 "Done %s.\n", __func__);
5512
5513 return rval;
5514 }
5515
5516 int
qla2x00_write_ram_word(scsi_qla_host_t * vha,uint32_t risc_addr,uint32_t data)5517 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5518 {
5519 int rval;
5520 mbx_cmd_t mc;
5521 mbx_cmd_t *mcp = &mc;
5522
5523 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5524 "Entered %s.\n", __func__);
5525
5526 if (!IS_FWI2_CAPABLE(vha->hw))
5527 return QLA_FUNCTION_FAILED;
5528
5529 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5530 mcp->mb[1] = LSW(risc_addr);
5531 mcp->mb[2] = LSW(data);
5532 mcp->mb[3] = MSW(data);
5533 mcp->mb[8] = MSW(risc_addr);
5534 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5535 mcp->in_mb = MBX_1|MBX_0;
5536 mcp->tov = MBX_TOV_SECONDS;
5537 mcp->flags = 0;
5538 rval = qla2x00_mailbox_command(vha, mcp);
5539 if (rval != QLA_SUCCESS) {
5540 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5541 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5542 rval, mcp->mb[0], mcp->mb[1]);
5543 } else {
5544 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5545 "Done %s.\n", __func__);
5546 }
5547
5548 return rval;
5549 }
5550
5551 int
qla81xx_write_mpi_register(scsi_qla_host_t * vha,uint16_t * mb)5552 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5553 {
5554 int rval;
5555 uint32_t stat, timer;
5556 uint16_t mb0 = 0;
5557 struct qla_hw_data *ha = vha->hw;
5558 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5559
5560 rval = QLA_SUCCESS;
5561
5562 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5563 "Entered %s.\n", __func__);
5564
5565 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5566
5567 /* Write the MBC data to the registers */
5568 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER);
5569 wrt_reg_word(®->mailbox1, mb[0]);
5570 wrt_reg_word(®->mailbox2, mb[1]);
5571 wrt_reg_word(®->mailbox3, mb[2]);
5572 wrt_reg_word(®->mailbox4, mb[3]);
5573
5574 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT);
5575
5576 /* Poll for MBC interrupt */
5577 for (timer = 6000000; timer; timer--) {
5578 /* Check for pending interrupts. */
5579 stat = rd_reg_dword(®->host_status);
5580 if (stat & HSRX_RISC_INT) {
5581 stat &= 0xff;
5582
5583 if (stat == 0x1 || stat == 0x2 ||
5584 stat == 0x10 || stat == 0x11) {
5585 set_bit(MBX_INTERRUPT,
5586 &ha->mbx_cmd_flags);
5587 mb0 = rd_reg_word(®->mailbox0);
5588 wrt_reg_dword(®->hccr,
5589 HCCRX_CLR_RISC_INT);
5590 rd_reg_dword(®->hccr);
5591 break;
5592 }
5593 }
5594 udelay(5);
5595 }
5596
5597 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5598 rval = mb0 & MBS_MASK;
5599 else
5600 rval = QLA_FUNCTION_FAILED;
5601
5602 if (rval != QLA_SUCCESS) {
5603 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5604 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5605 } else {
5606 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5607 "Done %s.\n", __func__);
5608 }
5609
5610 return rval;
5611 }
5612
5613 /* Set the specified data rate */
5614 int
qla2x00_set_data_rate(scsi_qla_host_t * vha,uint16_t mode)5615 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5616 {
5617 int rval;
5618 mbx_cmd_t mc;
5619 mbx_cmd_t *mcp = &mc;
5620 struct qla_hw_data *ha = vha->hw;
5621 uint16_t val;
5622
5623 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5624 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5625 mode);
5626
5627 if (!IS_FWI2_CAPABLE(ha))
5628 return QLA_FUNCTION_FAILED;
5629
5630 memset(mcp, 0, sizeof(*mcp));
5631 switch (ha->set_data_rate) {
5632 case PORT_SPEED_AUTO:
5633 case PORT_SPEED_4GB:
5634 case PORT_SPEED_8GB:
5635 case PORT_SPEED_16GB:
5636 case PORT_SPEED_32GB:
5637 val = ha->set_data_rate;
5638 break;
5639 default:
5640 ql_log(ql_log_warn, vha, 0x1199,
5641 "Unrecognized speed setting:%d. Setting Autoneg\n",
5642 ha->set_data_rate);
5643 val = ha->set_data_rate = PORT_SPEED_AUTO;
5644 break;
5645 }
5646
5647 mcp->mb[0] = MBC_DATA_RATE;
5648 mcp->mb[1] = mode;
5649 mcp->mb[2] = val;
5650
5651 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5652 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5653 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5654 mcp->in_mb |= MBX_4|MBX_3;
5655 mcp->tov = MBX_TOV_SECONDS;
5656 mcp->flags = 0;
5657 rval = qla2x00_mailbox_command(vha, mcp);
5658 if (rval != QLA_SUCCESS) {
5659 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5660 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5661 } else {
5662 if (mcp->mb[1] != 0x7)
5663 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5664 "Speed set:0x%x\n", mcp->mb[1]);
5665
5666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5667 "Done %s.\n", __func__);
5668 }
5669
5670 return rval;
5671 }
5672
5673 int
qla2x00_get_data_rate(scsi_qla_host_t * vha)5674 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5675 {
5676 int rval;
5677 mbx_cmd_t mc;
5678 mbx_cmd_t *mcp = &mc;
5679 struct qla_hw_data *ha = vha->hw;
5680
5681 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5682 "Entered %s.\n", __func__);
5683
5684 if (!IS_FWI2_CAPABLE(ha))
5685 return QLA_FUNCTION_FAILED;
5686
5687 mcp->mb[0] = MBC_DATA_RATE;
5688 mcp->mb[1] = QLA_GET_DATA_RATE;
5689 mcp->out_mb = MBX_1|MBX_0;
5690 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5691 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5692 mcp->in_mb |= MBX_4|MBX_3;
5693 mcp->tov = MBX_TOV_SECONDS;
5694 mcp->flags = 0;
5695 rval = qla2x00_mailbox_command(vha, mcp);
5696 if (rval != QLA_SUCCESS) {
5697 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5698 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5699 } else {
5700 if (mcp->mb[1] != 0x7)
5701 ha->link_data_rate = mcp->mb[1];
5702
5703 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5704 if (mcp->mb[4] & BIT_0)
5705 ql_log(ql_log_info, vha, 0x11a2,
5706 "FEC=enabled (data rate).\n");
5707 }
5708
5709 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5710 "Done %s.\n", __func__);
5711 if (mcp->mb[1] != 0x7)
5712 ha->link_data_rate = mcp->mb[1];
5713 }
5714
5715 return rval;
5716 }
5717
5718 int
qla81xx_get_port_config(scsi_qla_host_t * vha,uint16_t * mb)5719 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5720 {
5721 int rval;
5722 mbx_cmd_t mc;
5723 mbx_cmd_t *mcp = &mc;
5724 struct qla_hw_data *ha = vha->hw;
5725
5726 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5727 "Entered %s.\n", __func__);
5728
5729 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5730 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5731 return QLA_FUNCTION_FAILED;
5732 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5733 mcp->out_mb = MBX_0;
5734 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5735 mcp->tov = MBX_TOV_SECONDS;
5736 mcp->flags = 0;
5737
5738 rval = qla2x00_mailbox_command(vha, mcp);
5739
5740 if (rval != QLA_SUCCESS) {
5741 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5742 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5743 } else {
5744 /* Copy all bits to preserve original value */
5745 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5746
5747 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5748 "Done %s.\n", __func__);
5749 }
5750 return rval;
5751 }
5752
5753 int
qla81xx_set_port_config(scsi_qla_host_t * vha,uint16_t * mb)5754 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5755 {
5756 int rval;
5757 mbx_cmd_t mc;
5758 mbx_cmd_t *mcp = &mc;
5759
5760 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5761 "Entered %s.\n", __func__);
5762
5763 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5764 /* Copy all bits to preserve original setting */
5765 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5766 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5767 mcp->in_mb = MBX_0;
5768 mcp->tov = MBX_TOV_SECONDS;
5769 mcp->flags = 0;
5770 rval = qla2x00_mailbox_command(vha, mcp);
5771
5772 if (rval != QLA_SUCCESS) {
5773 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5774 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5775 } else
5776 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5777 "Done %s.\n", __func__);
5778
5779 return rval;
5780 }
5781
5782
5783 int
qla24xx_set_fcp_prio(scsi_qla_host_t * vha,uint16_t loop_id,uint16_t priority,uint16_t * mb)5784 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5785 uint16_t *mb)
5786 {
5787 int rval;
5788 mbx_cmd_t mc;
5789 mbx_cmd_t *mcp = &mc;
5790 struct qla_hw_data *ha = vha->hw;
5791
5792 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5793 "Entered %s.\n", __func__);
5794
5795 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5796 return QLA_FUNCTION_FAILED;
5797
5798 mcp->mb[0] = MBC_PORT_PARAMS;
5799 mcp->mb[1] = loop_id;
5800 if (ha->flags.fcp_prio_enabled)
5801 mcp->mb[2] = BIT_1;
5802 else
5803 mcp->mb[2] = BIT_2;
5804 mcp->mb[4] = priority & 0xf;
5805 mcp->mb[9] = vha->vp_idx;
5806 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5807 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5808 mcp->tov = MBX_TOV_SECONDS;
5809 mcp->flags = 0;
5810 rval = qla2x00_mailbox_command(vha, mcp);
5811 if (mb != NULL) {
5812 mb[0] = mcp->mb[0];
5813 mb[1] = mcp->mb[1];
5814 mb[3] = mcp->mb[3];
5815 mb[4] = mcp->mb[4];
5816 }
5817
5818 if (rval != QLA_SUCCESS) {
5819 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5820 } else {
5821 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5822 "Done %s.\n", __func__);
5823 }
5824
5825 return rval;
5826 }
5827
5828 int
qla2x00_get_thermal_temp(scsi_qla_host_t * vha,uint16_t * temp)5829 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5830 {
5831 int rval = QLA_FUNCTION_FAILED;
5832 struct qla_hw_data *ha = vha->hw;
5833 uint8_t byte;
5834
5835 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5836 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5837 "Thermal not supported by this card.\n");
5838 return rval;
5839 }
5840
5841 if (IS_QLA25XX(ha)) {
5842 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5843 ha->pdev->subsystem_device == 0x0175) {
5844 rval = qla2x00_read_sfp(vha, 0, &byte,
5845 0x98, 0x1, 1, BIT_13|BIT_0);
5846 *temp = byte;
5847 return rval;
5848 }
5849 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5850 ha->pdev->subsystem_device == 0x338e) {
5851 rval = qla2x00_read_sfp(vha, 0, &byte,
5852 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5853 *temp = byte;
5854 return rval;
5855 }
5856 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5857 "Thermal not supported by this card.\n");
5858 return rval;
5859 }
5860
5861 if (IS_QLA82XX(ha)) {
5862 *temp = qla82xx_read_temperature(vha);
5863 rval = QLA_SUCCESS;
5864 return rval;
5865 } else if (IS_QLA8044(ha)) {
5866 *temp = qla8044_read_temperature(vha);
5867 rval = QLA_SUCCESS;
5868 return rval;
5869 }
5870
5871 rval = qla2x00_read_asic_temperature(vha, temp);
5872 return rval;
5873 }
5874
5875 int
qla82xx_mbx_intr_enable(scsi_qla_host_t * vha)5876 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5877 {
5878 int rval;
5879 struct qla_hw_data *ha = vha->hw;
5880 mbx_cmd_t mc;
5881 mbx_cmd_t *mcp = &mc;
5882
5883 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5884 "Entered %s.\n", __func__);
5885
5886 if (!IS_FWI2_CAPABLE(ha))
5887 return QLA_FUNCTION_FAILED;
5888
5889 memset(mcp, 0, sizeof(mbx_cmd_t));
5890 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5891 mcp->mb[1] = 1;
5892
5893 mcp->out_mb = MBX_1|MBX_0;
5894 mcp->in_mb = MBX_0;
5895 mcp->tov = MBX_TOV_SECONDS;
5896 mcp->flags = 0;
5897
5898 rval = qla2x00_mailbox_command(vha, mcp);
5899 if (rval != QLA_SUCCESS) {
5900 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5901 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5902 } else {
5903 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5904 "Done %s.\n", __func__);
5905 }
5906
5907 return rval;
5908 }
5909
5910 int
qla82xx_mbx_intr_disable(scsi_qla_host_t * vha)5911 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5912 {
5913 int rval;
5914 struct qla_hw_data *ha = vha->hw;
5915 mbx_cmd_t mc;
5916 mbx_cmd_t *mcp = &mc;
5917
5918 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5919 "Entered %s.\n", __func__);
5920
5921 if (!IS_P3P_TYPE(ha))
5922 return QLA_FUNCTION_FAILED;
5923
5924 memset(mcp, 0, sizeof(mbx_cmd_t));
5925 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5926 mcp->mb[1] = 0;
5927
5928 mcp->out_mb = MBX_1|MBX_0;
5929 mcp->in_mb = MBX_0;
5930 mcp->tov = MBX_TOV_SECONDS;
5931 mcp->flags = 0;
5932
5933 rval = qla2x00_mailbox_command(vha, mcp);
5934 if (rval != QLA_SUCCESS) {
5935 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5936 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5937 } else {
5938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5939 "Done %s.\n", __func__);
5940 }
5941
5942 return rval;
5943 }
5944
5945 int
qla82xx_md_get_template_size(scsi_qla_host_t * vha)5946 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5947 {
5948 struct qla_hw_data *ha = vha->hw;
5949 mbx_cmd_t mc;
5950 mbx_cmd_t *mcp = &mc;
5951 int rval = QLA_FUNCTION_FAILED;
5952
5953 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5954 "Entered %s.\n", __func__);
5955
5956 memset(mcp->mb, 0 , sizeof(mcp->mb));
5957 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5958 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5959 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5960 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5961
5962 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5963 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5964 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5965
5966 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5967 mcp->tov = MBX_TOV_SECONDS;
5968 rval = qla2x00_mailbox_command(vha, mcp);
5969
5970 /* Always copy back return mailbox values. */
5971 if (rval != QLA_SUCCESS) {
5972 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5973 "mailbox command FAILED=0x%x, subcode=%x.\n",
5974 (mcp->mb[1] << 16) | mcp->mb[0],
5975 (mcp->mb[3] << 16) | mcp->mb[2]);
5976 } else {
5977 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5978 "Done %s.\n", __func__);
5979 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5980 if (!ha->md_template_size) {
5981 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5982 "Null template size obtained.\n");
5983 rval = QLA_FUNCTION_FAILED;
5984 }
5985 }
5986 return rval;
5987 }
5988
5989 int
qla82xx_md_get_template(scsi_qla_host_t * vha)5990 qla82xx_md_get_template(scsi_qla_host_t *vha)
5991 {
5992 struct qla_hw_data *ha = vha->hw;
5993 mbx_cmd_t mc;
5994 mbx_cmd_t *mcp = &mc;
5995 int rval = QLA_FUNCTION_FAILED;
5996
5997 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5998 "Entered %s.\n", __func__);
5999
6000 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
6001 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
6002 if (!ha->md_tmplt_hdr) {
6003 ql_log(ql_log_warn, vha, 0x1124,
6004 "Unable to allocate memory for Minidump template.\n");
6005 return rval;
6006 }
6007
6008 memset(mcp->mb, 0 , sizeof(mcp->mb));
6009 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6010 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6011 mcp->mb[2] = LSW(RQST_TMPLT);
6012 mcp->mb[3] = MSW(RQST_TMPLT);
6013 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
6014 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
6015 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
6016 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
6017 mcp->mb[8] = LSW(ha->md_template_size);
6018 mcp->mb[9] = MSW(ha->md_template_size);
6019
6020 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
6021 mcp->tov = MBX_TOV_SECONDS;
6022 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
6023 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6024 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6025 rval = qla2x00_mailbox_command(vha, mcp);
6026
6027 if (rval != QLA_SUCCESS) {
6028 ql_dbg(ql_dbg_mbx, vha, 0x1125,
6029 "mailbox command FAILED=0x%x, subcode=%x.\n",
6030 ((mcp->mb[1] << 16) | mcp->mb[0]),
6031 ((mcp->mb[3] << 16) | mcp->mb[2]));
6032 } else
6033 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
6034 "Done %s.\n", __func__);
6035 return rval;
6036 }
6037
6038 int
qla8044_md_get_template(scsi_qla_host_t * vha)6039 qla8044_md_get_template(scsi_qla_host_t *vha)
6040 {
6041 struct qla_hw_data *ha = vha->hw;
6042 mbx_cmd_t mc;
6043 mbx_cmd_t *mcp = &mc;
6044 int rval = QLA_FUNCTION_FAILED;
6045 int offset = 0, size = MINIDUMP_SIZE_36K;
6046
6047 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
6048 "Entered %s.\n", __func__);
6049
6050 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
6051 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
6052 if (!ha->md_tmplt_hdr) {
6053 ql_log(ql_log_warn, vha, 0xb11b,
6054 "Unable to allocate memory for Minidump template.\n");
6055 return rval;
6056 }
6057
6058 memset(mcp->mb, 0 , sizeof(mcp->mb));
6059 while (offset < ha->md_template_size) {
6060 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6061 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6062 mcp->mb[2] = LSW(RQST_TMPLT);
6063 mcp->mb[3] = MSW(RQST_TMPLT);
6064 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
6065 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
6066 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
6067 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
6068 mcp->mb[8] = LSW(size);
6069 mcp->mb[9] = MSW(size);
6070 mcp->mb[10] = offset & 0x0000FFFF;
6071 mcp->mb[11] = offset & 0xFFFF0000;
6072 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
6073 mcp->tov = MBX_TOV_SECONDS;
6074 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
6075 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6076 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6077 rval = qla2x00_mailbox_command(vha, mcp);
6078
6079 if (rval != QLA_SUCCESS) {
6080 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
6081 "mailbox command FAILED=0x%x, subcode=%x.\n",
6082 ((mcp->mb[1] << 16) | mcp->mb[0]),
6083 ((mcp->mb[3] << 16) | mcp->mb[2]));
6084 return rval;
6085 } else
6086 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
6087 "Done %s.\n", __func__);
6088 offset = offset + size;
6089 }
6090 return rval;
6091 }
6092
6093 int
qla81xx_set_led_config(scsi_qla_host_t * vha,uint16_t * led_cfg)6094 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6095 {
6096 int rval;
6097 struct qla_hw_data *ha = vha->hw;
6098 mbx_cmd_t mc;
6099 mbx_cmd_t *mcp = &mc;
6100
6101 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6102 return QLA_FUNCTION_FAILED;
6103
6104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
6105 "Entered %s.\n", __func__);
6106
6107 memset(mcp, 0, sizeof(mbx_cmd_t));
6108 mcp->mb[0] = MBC_SET_LED_CONFIG;
6109 mcp->mb[1] = led_cfg[0];
6110 mcp->mb[2] = led_cfg[1];
6111 if (IS_QLA8031(ha)) {
6112 mcp->mb[3] = led_cfg[2];
6113 mcp->mb[4] = led_cfg[3];
6114 mcp->mb[5] = led_cfg[4];
6115 mcp->mb[6] = led_cfg[5];
6116 }
6117
6118 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6119 if (IS_QLA8031(ha))
6120 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6121 mcp->in_mb = MBX_0;
6122 mcp->tov = MBX_TOV_SECONDS;
6123 mcp->flags = 0;
6124
6125 rval = qla2x00_mailbox_command(vha, mcp);
6126 if (rval != QLA_SUCCESS) {
6127 ql_dbg(ql_dbg_mbx, vha, 0x1134,
6128 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6129 } else {
6130 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
6131 "Done %s.\n", __func__);
6132 }
6133
6134 return rval;
6135 }
6136
6137 int
qla81xx_get_led_config(scsi_qla_host_t * vha,uint16_t * led_cfg)6138 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6139 {
6140 int rval;
6141 struct qla_hw_data *ha = vha->hw;
6142 mbx_cmd_t mc;
6143 mbx_cmd_t *mcp = &mc;
6144
6145 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6146 return QLA_FUNCTION_FAILED;
6147
6148 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
6149 "Entered %s.\n", __func__);
6150
6151 memset(mcp, 0, sizeof(mbx_cmd_t));
6152 mcp->mb[0] = MBC_GET_LED_CONFIG;
6153
6154 mcp->out_mb = MBX_0;
6155 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6156 if (IS_QLA8031(ha))
6157 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6158 mcp->tov = MBX_TOV_SECONDS;
6159 mcp->flags = 0;
6160
6161 rval = qla2x00_mailbox_command(vha, mcp);
6162 if (rval != QLA_SUCCESS) {
6163 ql_dbg(ql_dbg_mbx, vha, 0x1137,
6164 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6165 } else {
6166 led_cfg[0] = mcp->mb[1];
6167 led_cfg[1] = mcp->mb[2];
6168 if (IS_QLA8031(ha)) {
6169 led_cfg[2] = mcp->mb[3];
6170 led_cfg[3] = mcp->mb[4];
6171 led_cfg[4] = mcp->mb[5];
6172 led_cfg[5] = mcp->mb[6];
6173 }
6174 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
6175 "Done %s.\n", __func__);
6176 }
6177
6178 return rval;
6179 }
6180
6181 int
qla82xx_mbx_beacon_ctl(scsi_qla_host_t * vha,int enable)6182 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
6183 {
6184 int rval;
6185 struct qla_hw_data *ha = vha->hw;
6186 mbx_cmd_t mc;
6187 mbx_cmd_t *mcp = &mc;
6188
6189 if (!IS_P3P_TYPE(ha))
6190 return QLA_FUNCTION_FAILED;
6191
6192 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
6193 "Entered %s.\n", __func__);
6194
6195 memset(mcp, 0, sizeof(mbx_cmd_t));
6196 mcp->mb[0] = MBC_SET_LED_CONFIG;
6197 if (enable)
6198 mcp->mb[7] = 0xE;
6199 else
6200 mcp->mb[7] = 0xD;
6201
6202 mcp->out_mb = MBX_7|MBX_0;
6203 mcp->in_mb = MBX_0;
6204 mcp->tov = MBX_TOV_SECONDS;
6205 mcp->flags = 0;
6206
6207 rval = qla2x00_mailbox_command(vha, mcp);
6208 if (rval != QLA_SUCCESS) {
6209 ql_dbg(ql_dbg_mbx, vha, 0x1128,
6210 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6211 } else {
6212 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
6213 "Done %s.\n", __func__);
6214 }
6215
6216 return rval;
6217 }
6218
6219 int
qla83xx_wr_reg(scsi_qla_host_t * vha,uint32_t reg,uint32_t data)6220 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
6221 {
6222 int rval;
6223 struct qla_hw_data *ha = vha->hw;
6224 mbx_cmd_t mc;
6225 mbx_cmd_t *mcp = &mc;
6226
6227 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6228 return QLA_FUNCTION_FAILED;
6229
6230 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
6231 "Entered %s.\n", __func__);
6232
6233 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6234 mcp->mb[1] = LSW(reg);
6235 mcp->mb[2] = MSW(reg);
6236 mcp->mb[3] = LSW(data);
6237 mcp->mb[4] = MSW(data);
6238 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6239
6240 mcp->in_mb = MBX_1|MBX_0;
6241 mcp->tov = MBX_TOV_SECONDS;
6242 mcp->flags = 0;
6243 rval = qla2x00_mailbox_command(vha, mcp);
6244
6245 if (rval != QLA_SUCCESS) {
6246 ql_dbg(ql_dbg_mbx, vha, 0x1131,
6247 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6248 } else {
6249 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
6250 "Done %s.\n", __func__);
6251 }
6252
6253 return rval;
6254 }
6255
6256 int
qla2x00_port_logout(scsi_qla_host_t * vha,struct fc_port * fcport)6257 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
6258 {
6259 int rval;
6260 struct qla_hw_data *ha = vha->hw;
6261 mbx_cmd_t mc;
6262 mbx_cmd_t *mcp = &mc;
6263
6264 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6265 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
6266 "Implicit LOGO Unsupported.\n");
6267 return QLA_FUNCTION_FAILED;
6268 }
6269
6270
6271 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
6272 "Entering %s.\n", __func__);
6273
6274 /* Perform Implicit LOGO. */
6275 mcp->mb[0] = MBC_PORT_LOGOUT;
6276 mcp->mb[1] = fcport->loop_id;
6277 mcp->mb[10] = BIT_15;
6278 mcp->out_mb = MBX_10|MBX_1|MBX_0;
6279 mcp->in_mb = MBX_0;
6280 mcp->tov = MBX_TOV_SECONDS;
6281 mcp->flags = 0;
6282 rval = qla2x00_mailbox_command(vha, mcp);
6283 if (rval != QLA_SUCCESS)
6284 ql_dbg(ql_dbg_mbx, vha, 0x113d,
6285 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6286 else
6287 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
6288 "Done %s.\n", __func__);
6289
6290 return rval;
6291 }
6292
6293 int
qla83xx_rd_reg(scsi_qla_host_t * vha,uint32_t reg,uint32_t * data)6294 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6295 {
6296 int rval;
6297 mbx_cmd_t mc;
6298 mbx_cmd_t *mcp = &mc;
6299 struct qla_hw_data *ha = vha->hw;
6300 unsigned long retry_max_time = jiffies + (2 * HZ);
6301
6302 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6303 return QLA_FUNCTION_FAILED;
6304
6305 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6306
6307 retry_rd_reg:
6308 mcp->mb[0] = MBC_READ_REMOTE_REG;
6309 mcp->mb[1] = LSW(reg);
6310 mcp->mb[2] = MSW(reg);
6311 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6312 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6313 mcp->tov = MBX_TOV_SECONDS;
6314 mcp->flags = 0;
6315 rval = qla2x00_mailbox_command(vha, mcp);
6316
6317 if (rval != QLA_SUCCESS) {
6318 ql_dbg(ql_dbg_mbx, vha, 0x114c,
6319 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6320 rval, mcp->mb[0], mcp->mb[1]);
6321 } else {
6322 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
6323 if (*data == QLA8XXX_BAD_VALUE) {
6324 /*
6325 * During soft-reset CAMRAM register reads might
6326 * return 0xbad0bad0. So retry for MAX of 2 sec
6327 * while reading camram registers.
6328 */
6329 if (time_after(jiffies, retry_max_time)) {
6330 ql_dbg(ql_dbg_mbx, vha, 0x1141,
6331 "Failure to read CAMRAM register. "
6332 "data=0x%x.\n", *data);
6333 return QLA_FUNCTION_FAILED;
6334 }
6335 msleep(100);
6336 goto retry_rd_reg;
6337 }
6338 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6339 }
6340
6341 return rval;
6342 }
6343
6344 int
qla83xx_restart_nic_firmware(scsi_qla_host_t * vha)6345 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6346 {
6347 int rval;
6348 mbx_cmd_t mc;
6349 mbx_cmd_t *mcp = &mc;
6350 struct qla_hw_data *ha = vha->hw;
6351
6352 if (!IS_QLA83XX(ha))
6353 return QLA_FUNCTION_FAILED;
6354
6355 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6356
6357 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6358 mcp->out_mb = MBX_0;
6359 mcp->in_mb = MBX_1|MBX_0;
6360 mcp->tov = MBX_TOV_SECONDS;
6361 mcp->flags = 0;
6362 rval = qla2x00_mailbox_command(vha, mcp);
6363
6364 if (rval != QLA_SUCCESS) {
6365 ql_dbg(ql_dbg_mbx, vha, 0x1144,
6366 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6367 rval, mcp->mb[0], mcp->mb[1]);
6368 qla2xxx_dump_fw(vha);
6369 } else {
6370 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6371 }
6372
6373 return rval;
6374 }
6375
6376 int
qla83xx_access_control(scsi_qla_host_t * vha,uint16_t options,uint32_t start_addr,uint32_t end_addr,uint16_t * sector_size)6377 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6378 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6379 {
6380 int rval;
6381 mbx_cmd_t mc;
6382 mbx_cmd_t *mcp = &mc;
6383 uint8_t subcode = (uint8_t)options;
6384 struct qla_hw_data *ha = vha->hw;
6385
6386 if (!IS_QLA8031(ha))
6387 return QLA_FUNCTION_FAILED;
6388
6389 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6390
6391 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6392 mcp->mb[1] = options;
6393 mcp->out_mb = MBX_1|MBX_0;
6394 if (subcode & BIT_2) {
6395 mcp->mb[2] = LSW(start_addr);
6396 mcp->mb[3] = MSW(start_addr);
6397 mcp->mb[4] = LSW(end_addr);
6398 mcp->mb[5] = MSW(end_addr);
6399 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6400 }
6401 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6402 if (!(subcode & (BIT_2 | BIT_5)))
6403 mcp->in_mb |= MBX_4|MBX_3;
6404 mcp->tov = MBX_TOV_SECONDS;
6405 mcp->flags = 0;
6406 rval = qla2x00_mailbox_command(vha, mcp);
6407
6408 if (rval != QLA_SUCCESS) {
6409 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6410 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6411 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6412 mcp->mb[4]);
6413 qla2xxx_dump_fw(vha);
6414 } else {
6415 if (subcode & BIT_5)
6416 *sector_size = mcp->mb[1];
6417 else if (subcode & (BIT_6 | BIT_7)) {
6418 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6419 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6420 } else if (subcode & (BIT_3 | BIT_4)) {
6421 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6422 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6423 }
6424 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6425 }
6426
6427 return rval;
6428 }
6429
6430 int
qla2x00_dump_mctp_data(scsi_qla_host_t * vha,dma_addr_t req_dma,uint32_t addr,uint32_t size)6431 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6432 uint32_t size)
6433 {
6434 int rval;
6435 mbx_cmd_t mc;
6436 mbx_cmd_t *mcp = &mc;
6437
6438 if (!IS_MCTP_CAPABLE(vha->hw))
6439 return QLA_FUNCTION_FAILED;
6440
6441 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6442 "Entered %s.\n", __func__);
6443
6444 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6445 mcp->mb[1] = LSW(addr);
6446 mcp->mb[2] = MSW(req_dma);
6447 mcp->mb[3] = LSW(req_dma);
6448 mcp->mb[4] = MSW(size);
6449 mcp->mb[5] = LSW(size);
6450 mcp->mb[6] = MSW(MSD(req_dma));
6451 mcp->mb[7] = LSW(MSD(req_dma));
6452 mcp->mb[8] = MSW(addr);
6453 /* Setting RAM ID to valid */
6454 /* For MCTP RAM ID is 0x40 */
6455 mcp->mb[10] = BIT_7 | 0x40;
6456
6457 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6458 MBX_0;
6459
6460 mcp->in_mb = MBX_0;
6461 mcp->tov = MBX_TOV_SECONDS;
6462 mcp->flags = 0;
6463 rval = qla2x00_mailbox_command(vha, mcp);
6464
6465 if (rval != QLA_SUCCESS) {
6466 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6467 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6468 } else {
6469 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6470 "Done %s.\n", __func__);
6471 }
6472
6473 return rval;
6474 }
6475
6476 int
qla26xx_dport_diagnostics(scsi_qla_host_t * vha,void * dd_buf,uint size,uint options)6477 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6478 void *dd_buf, uint size, uint options)
6479 {
6480 int rval;
6481 mbx_cmd_t mc;
6482 mbx_cmd_t *mcp = &mc;
6483 dma_addr_t dd_dma;
6484
6485 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6486 !IS_QLA28XX(vha->hw))
6487 return QLA_FUNCTION_FAILED;
6488
6489 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6490 "Entered %s.\n", __func__);
6491
6492 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6493 dd_buf, size, DMA_FROM_DEVICE);
6494 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6495 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6496 return QLA_MEMORY_ALLOC_FAILED;
6497 }
6498
6499 memset(dd_buf, 0, size);
6500
6501 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6502 mcp->mb[1] = options;
6503 mcp->mb[2] = MSW(LSD(dd_dma));
6504 mcp->mb[3] = LSW(LSD(dd_dma));
6505 mcp->mb[6] = MSW(MSD(dd_dma));
6506 mcp->mb[7] = LSW(MSD(dd_dma));
6507 mcp->mb[8] = size;
6508 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6509 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6510 mcp->buf_size = size;
6511 mcp->flags = MBX_DMA_IN;
6512 mcp->tov = MBX_TOV_SECONDS * 4;
6513 rval = qla2x00_mailbox_command(vha, mcp);
6514
6515 if (rval != QLA_SUCCESS) {
6516 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6517 } else {
6518 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6519 "Done %s.\n", __func__);
6520 }
6521
6522 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6523 size, DMA_FROM_DEVICE);
6524
6525 return rval;
6526 }
6527
6528 int
qla26xx_dport_diagnostics_v2(scsi_qla_host_t * vha,struct qla_dport_diag_v2 * dd,mbx_cmd_t * mcp)6529 qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha,
6530 struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp)
6531 {
6532 int rval;
6533 dma_addr_t dd_dma;
6534 uint size = sizeof(dd->buf);
6535 uint16_t options = dd->options;
6536
6537 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6538 "Entered %s.\n", __func__);
6539
6540 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6541 dd->buf, size, DMA_FROM_DEVICE);
6542 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6543 ql_log(ql_log_warn, vha, 0x1194,
6544 "Failed to map dma buffer.\n");
6545 return QLA_MEMORY_ALLOC_FAILED;
6546 }
6547
6548 memset(dd->buf, 0, size);
6549
6550 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6551 mcp->mb[1] = options;
6552 mcp->mb[2] = MSW(LSD(dd_dma));
6553 mcp->mb[3] = LSW(LSD(dd_dma));
6554 mcp->mb[6] = MSW(MSD(dd_dma));
6555 mcp->mb[7] = LSW(MSD(dd_dma));
6556 mcp->mb[8] = size;
6557 mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0;
6558 mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0;
6559 mcp->buf_size = size;
6560 mcp->flags = MBX_DMA_IN;
6561 mcp->tov = MBX_TOV_SECONDS * 4;
6562 rval = qla2x00_mailbox_command(vha, mcp);
6563
6564 if (rval != QLA_SUCCESS) {
6565 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6566 } else {
6567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6568 "Done %s.\n", __func__);
6569 }
6570
6571 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE);
6572
6573 return rval;
6574 }
6575
qla2x00_async_mb_sp_done(srb_t * sp,int res)6576 static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6577 {
6578 sp->u.iocb_cmd.u.mbx.rc = res;
6579
6580 complete(&sp->u.iocb_cmd.u.mbx.comp);
6581 /* don't free sp here. Let the caller do the free */
6582 }
6583
6584 /*
6585 * This mailbox uses the iocb interface to send MB command.
6586 * This allows non-critial (non chip setup) command to go
6587 * out in parrallel.
6588 */
qla24xx_send_mb_cmd(struct scsi_qla_host * vha,mbx_cmd_t * mcp)6589 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6590 {
6591 int rval = QLA_FUNCTION_FAILED;
6592 srb_t *sp;
6593 struct srb_iocb *c;
6594
6595 if (!vha->hw->flags.fw_started)
6596 goto done;
6597
6598 /* ref: INIT */
6599 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6600 if (!sp)
6601 goto done;
6602
6603 c = &sp->u.iocb_cmd;
6604 init_completion(&c->u.mbx.comp);
6605
6606 sp->type = SRB_MB_IOCB;
6607 sp->name = mb_to_str(mcp->mb[0]);
6608 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
6609 qla2x00_async_mb_sp_done);
6610
6611 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6612
6613 rval = qla2x00_start_sp(sp);
6614 if (rval != QLA_SUCCESS) {
6615 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6616 "%s: %s Failed submission. %x.\n",
6617 __func__, sp->name, rval);
6618 goto done_free_sp;
6619 }
6620
6621 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6622 sp->name, sp->handle);
6623
6624 wait_for_completion(&c->u.mbx.comp);
6625 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6626
6627 rval = c->u.mbx.rc;
6628 switch (rval) {
6629 case QLA_FUNCTION_TIMEOUT:
6630 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6631 __func__, sp->name, rval);
6632 break;
6633 case QLA_SUCCESS:
6634 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6635 __func__, sp->name);
6636 break;
6637 default:
6638 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6639 __func__, sp->name, rval);
6640 break;
6641 }
6642
6643 done_free_sp:
6644 /* ref: INIT */
6645 kref_put(&sp->cmd_kref, qla2x00_sp_release);
6646 done:
6647 return rval;
6648 }
6649
qla24xx_print_fc_port_id(struct scsi_qla_host * vha,struct seq_file * s,u16 loop_id)6650 int qla24xx_print_fc_port_id(struct scsi_qla_host *vha, struct seq_file *s, u16 loop_id)
6651 {
6652 int rval = QLA_FUNCTION_FAILED;
6653 dma_addr_t pd_dma;
6654 struct port_database_24xx *pd;
6655 struct qla_hw_data *ha = vha->hw;
6656 mbx_cmd_t mc;
6657
6658 if (!vha->hw->flags.fw_started)
6659 goto done;
6660
6661 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6662 if (pd == NULL) {
6663 ql_log(ql_log_warn, vha, 0xd047,
6664 "Failed to allocate port database structure.\n");
6665 goto done;
6666 }
6667
6668 memset(&mc, 0, sizeof(mc));
6669 mc.mb[0] = MBC_GET_PORT_DATABASE;
6670 mc.mb[1] = loop_id;
6671 mc.mb[2] = MSW(pd_dma);
6672 mc.mb[3] = LSW(pd_dma);
6673 mc.mb[6] = MSW(MSD(pd_dma));
6674 mc.mb[7] = LSW(MSD(pd_dma));
6675 mc.mb[9] = vha->vp_idx;
6676
6677 rval = qla24xx_send_mb_cmd(vha, &mc);
6678 if (rval != QLA_SUCCESS) {
6679 ql_dbg(ql_dbg_mbx, vha, 0x1193, "%s: fail\n", __func__);
6680 goto done_free_sp;
6681 }
6682
6683 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6684 __func__, pd->port_name);
6685
6686 seq_printf(s, "%8phC %02x%02x%02x %d\n",
6687 pd->port_name, pd->port_id[0],
6688 pd->port_id[1], pd->port_id[2],
6689 loop_id);
6690
6691 done_free_sp:
6692 if (pd)
6693 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6694 done:
6695 return rval;
6696 }
6697
6698 /*
6699 * qla24xx_gpdb_wait
6700 * NOTE: Do not call this routine from DPC thread
6701 */
qla24xx_gpdb_wait(struct scsi_qla_host * vha,fc_port_t * fcport,u8 opt)6702 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6703 {
6704 int rval = QLA_FUNCTION_FAILED;
6705 dma_addr_t pd_dma;
6706 struct port_database_24xx *pd;
6707 struct qla_hw_data *ha = vha->hw;
6708 mbx_cmd_t mc;
6709
6710 if (!vha->hw->flags.fw_started)
6711 goto done;
6712
6713 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6714 if (pd == NULL) {
6715 ql_log(ql_log_warn, vha, 0xd047,
6716 "Failed to allocate port database structure.\n");
6717 goto done_free_sp;
6718 }
6719
6720 memset(&mc, 0, sizeof(mc));
6721 mc.mb[0] = MBC_GET_PORT_DATABASE;
6722 mc.mb[1] = fcport->loop_id;
6723 mc.mb[2] = MSW(pd_dma);
6724 mc.mb[3] = LSW(pd_dma);
6725 mc.mb[6] = MSW(MSD(pd_dma));
6726 mc.mb[7] = LSW(MSD(pd_dma));
6727 mc.mb[9] = vha->vp_idx;
6728 mc.mb[10] = opt;
6729
6730 rval = qla24xx_send_mb_cmd(vha, &mc);
6731 if (rval != QLA_SUCCESS) {
6732 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6733 "%s: %8phC fail\n", __func__, fcport->port_name);
6734 goto done_free_sp;
6735 }
6736
6737 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6738
6739 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6740 __func__, fcport->port_name);
6741
6742 done_free_sp:
6743 if (pd)
6744 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6745 done:
6746 return rval;
6747 }
6748
__qla24xx_parse_gpdb(struct scsi_qla_host * vha,fc_port_t * fcport,struct port_database_24xx * pd)6749 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6750 struct port_database_24xx *pd)
6751 {
6752 int rval = QLA_SUCCESS;
6753 uint64_t zero = 0;
6754 u8 current_login_state, last_login_state;
6755
6756 if (NVME_TARGET(vha->hw, fcport)) {
6757 current_login_state = pd->current_login_state >> 4;
6758 last_login_state = pd->last_login_state >> 4;
6759 } else {
6760 current_login_state = pd->current_login_state & 0xf;
6761 last_login_state = pd->last_login_state & 0xf;
6762 }
6763
6764 /* Check for logged in state. */
6765 if (current_login_state != PDS_PRLI_COMPLETE) {
6766 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6767 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6768 current_login_state, last_login_state, fcport->loop_id);
6769 rval = QLA_FUNCTION_FAILED;
6770 goto gpd_error_out;
6771 }
6772
6773 if (fcport->loop_id == FC_NO_LOOP_ID ||
6774 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6775 memcmp(fcport->port_name, pd->port_name, 8))) {
6776 /* We lost the device mid way. */
6777 rval = QLA_NOT_LOGGED_IN;
6778 goto gpd_error_out;
6779 }
6780
6781 /* Names are little-endian. */
6782 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6783 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6784
6785 /* Get port_id of device. */
6786 fcport->d_id.b.domain = pd->port_id[0];
6787 fcport->d_id.b.area = pd->port_id[1];
6788 fcport->d_id.b.al_pa = pd->port_id[2];
6789 fcport->d_id.b.rsvd_1 = 0;
6790
6791 ql_dbg(ql_dbg_disc, vha, 0x2062,
6792 "%8phC SVC Param w3 %02x%02x",
6793 fcport->port_name,
6794 pd->prli_svc_param_word_3[1],
6795 pd->prli_svc_param_word_3[0]);
6796
6797 if (NVME_TARGET(vha->hw, fcport)) {
6798 fcport->port_type = FCT_NVME;
6799 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6800 fcport->port_type |= FCT_NVME_INITIATOR;
6801 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6802 fcport->port_type |= FCT_NVME_TARGET;
6803 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6804 fcport->port_type |= FCT_NVME_DISCOVERY;
6805 } else {
6806 /* If not target must be initiator or unknown type. */
6807 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6808 fcport->port_type = FCT_INITIATOR;
6809 else
6810 fcport->port_type = FCT_TARGET;
6811 }
6812 /* Passback COS information. */
6813 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6814 FC_COS_CLASS2 : FC_COS_CLASS3;
6815
6816 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6817 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6818 fcport->conf_compl_supported = 1;
6819 }
6820
6821 gpd_error_out:
6822 return rval;
6823 }
6824
6825 /*
6826 * qla24xx_gidlist__wait
6827 * NOTE: don't call this routine from DPC thread.
6828 */
qla24xx_gidlist_wait(struct scsi_qla_host * vha,void * id_list,dma_addr_t id_list_dma,uint16_t * entries)6829 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6830 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6831 {
6832 int rval = QLA_FUNCTION_FAILED;
6833 mbx_cmd_t mc;
6834
6835 if (!vha->hw->flags.fw_started)
6836 goto done;
6837
6838 memset(&mc, 0, sizeof(mc));
6839 mc.mb[0] = MBC_GET_ID_LIST;
6840 mc.mb[2] = MSW(id_list_dma);
6841 mc.mb[3] = LSW(id_list_dma);
6842 mc.mb[6] = MSW(MSD(id_list_dma));
6843 mc.mb[7] = LSW(MSD(id_list_dma));
6844 mc.mb[8] = 0;
6845 mc.mb[9] = vha->vp_idx;
6846
6847 rval = qla24xx_send_mb_cmd(vha, &mc);
6848 if (rval != QLA_SUCCESS) {
6849 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6850 "%s: fail\n", __func__);
6851 } else {
6852 *entries = mc.mb[1];
6853 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6854 "%s: done\n", __func__);
6855 }
6856 done:
6857 return rval;
6858 }
6859
qla27xx_set_zio_threshold(scsi_qla_host_t * vha,uint16_t value)6860 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6861 {
6862 int rval;
6863 mbx_cmd_t mc;
6864 mbx_cmd_t *mcp = &mc;
6865
6866 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6867 "Entered %s\n", __func__);
6868
6869 memset(mcp->mb, 0 , sizeof(mcp->mb));
6870 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6871 mcp->mb[1] = 1;
6872 mcp->mb[2] = value;
6873 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6874 mcp->in_mb = MBX_2 | MBX_0;
6875 mcp->tov = MBX_TOV_SECONDS;
6876 mcp->flags = 0;
6877
6878 rval = qla2x00_mailbox_command(vha, mcp);
6879
6880 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6881 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6882
6883 return rval;
6884 }
6885
qla27xx_get_zio_threshold(scsi_qla_host_t * vha,uint16_t * value)6886 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6887 {
6888 int rval;
6889 mbx_cmd_t mc;
6890 mbx_cmd_t *mcp = &mc;
6891
6892 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6893 "Entered %s\n", __func__);
6894
6895 memset(mcp->mb, 0, sizeof(mcp->mb));
6896 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6897 mcp->mb[1] = 0;
6898 mcp->out_mb = MBX_1 | MBX_0;
6899 mcp->in_mb = MBX_2 | MBX_0;
6900 mcp->tov = MBX_TOV_SECONDS;
6901 mcp->flags = 0;
6902
6903 rval = qla2x00_mailbox_command(vha, mcp);
6904 if (rval == QLA_SUCCESS)
6905 *value = mc.mb[2];
6906
6907 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6908 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6909
6910 return rval;
6911 }
6912
6913 int
qla2x00_read_sfp_dev(struct scsi_qla_host * vha,char * buf,int count)6914 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6915 {
6916 struct qla_hw_data *ha = vha->hw;
6917 uint16_t iter, addr, offset;
6918 dma_addr_t phys_addr;
6919 int rval, c;
6920 u8 *sfp_data;
6921
6922 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6923 addr = 0xa0;
6924 phys_addr = ha->sfp_data_dma;
6925 sfp_data = ha->sfp_data;
6926 offset = c = 0;
6927
6928 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6929 if (iter == 4) {
6930 /* Skip to next device address. */
6931 addr = 0xa2;
6932 offset = 0;
6933 }
6934
6935 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6936 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6937 if (rval != QLA_SUCCESS) {
6938 ql_log(ql_log_warn, vha, 0x706d,
6939 "Unable to read SFP data (%x/%x/%x).\n", rval,
6940 addr, offset);
6941
6942 return rval;
6943 }
6944
6945 if (buf && (c < count)) {
6946 u16 sz;
6947
6948 if ((count - c) >= SFP_BLOCK_SIZE)
6949 sz = SFP_BLOCK_SIZE;
6950 else
6951 sz = count - c;
6952
6953 memcpy(buf, sfp_data, sz);
6954 buf += SFP_BLOCK_SIZE;
6955 c += sz;
6956 }
6957 phys_addr += SFP_BLOCK_SIZE;
6958 sfp_data += SFP_BLOCK_SIZE;
6959 offset += SFP_BLOCK_SIZE;
6960 }
6961
6962 return rval;
6963 }
6964
qla24xx_res_count_wait(struct scsi_qla_host * vha,uint16_t * out_mb,int out_mb_sz)6965 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6966 uint16_t *out_mb, int out_mb_sz)
6967 {
6968 int rval = QLA_FUNCTION_FAILED;
6969 mbx_cmd_t mc;
6970
6971 if (!vha->hw->flags.fw_started)
6972 goto done;
6973
6974 memset(&mc, 0, sizeof(mc));
6975 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6976
6977 rval = qla24xx_send_mb_cmd(vha, &mc);
6978 if (rval != QLA_SUCCESS) {
6979 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6980 "%s: fail\n", __func__);
6981 } else {
6982 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6983 memcpy(out_mb, mc.mb, out_mb_sz);
6984 else
6985 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6986
6987 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6988 "%s: done\n", __func__);
6989 }
6990 done:
6991 return rval;
6992 }
6993
qla28xx_secure_flash_update(scsi_qla_host_t * vha,uint16_t opts,uint16_t region,uint32_t len,dma_addr_t sfub_dma_addr,uint32_t sfub_len)6994 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6995 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6996 uint32_t sfub_len)
6997 {
6998 int rval;
6999 mbx_cmd_t mc;
7000 mbx_cmd_t *mcp = &mc;
7001
7002 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
7003 mcp->mb[1] = opts;
7004 mcp->mb[2] = region;
7005 mcp->mb[3] = MSW(len);
7006 mcp->mb[4] = LSW(len);
7007 mcp->mb[5] = MSW(sfub_dma_addr);
7008 mcp->mb[6] = LSW(sfub_dma_addr);
7009 mcp->mb[7] = MSW(MSD(sfub_dma_addr));
7010 mcp->mb[8] = LSW(MSD(sfub_dma_addr));
7011 mcp->mb[9] = sfub_len;
7012 mcp->out_mb =
7013 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
7014 mcp->in_mb = MBX_2|MBX_1|MBX_0;
7015 mcp->tov = MBX_TOV_SECONDS;
7016 mcp->flags = 0;
7017 rval = qla2x00_mailbox_command(vha, mcp);
7018
7019 if (rval != QLA_SUCCESS) {
7020 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
7021 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
7022 mcp->mb[2]);
7023 }
7024
7025 return rval;
7026 }
7027
qla2xxx_write_remote_register(scsi_qla_host_t * vha,uint32_t addr,uint32_t data)7028 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
7029 uint32_t data)
7030 {
7031 int rval;
7032 mbx_cmd_t mc;
7033 mbx_cmd_t *mcp = &mc;
7034
7035 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
7036 "Entered %s.\n", __func__);
7037
7038 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
7039 mcp->mb[1] = LSW(addr);
7040 mcp->mb[2] = MSW(addr);
7041 mcp->mb[3] = LSW(data);
7042 mcp->mb[4] = MSW(data);
7043 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
7044 mcp->in_mb = MBX_1|MBX_0;
7045 mcp->tov = MBX_TOV_SECONDS;
7046 mcp->flags = 0;
7047 rval = qla2x00_mailbox_command(vha, mcp);
7048
7049 if (rval != QLA_SUCCESS) {
7050 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
7051 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
7052 } else {
7053 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
7054 "Done %s.\n", __func__);
7055 }
7056
7057 return rval;
7058 }
7059
qla2xxx_read_remote_register(scsi_qla_host_t * vha,uint32_t addr,uint32_t * data)7060 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
7061 uint32_t *data)
7062 {
7063 int rval;
7064 mbx_cmd_t mc;
7065 mbx_cmd_t *mcp = &mc;
7066
7067 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
7068 "Entered %s.\n", __func__);
7069
7070 mcp->mb[0] = MBC_READ_REMOTE_REG;
7071 mcp->mb[1] = LSW(addr);
7072 mcp->mb[2] = MSW(addr);
7073 mcp->out_mb = MBX_2|MBX_1|MBX_0;
7074 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
7075 mcp->tov = MBX_TOV_SECONDS;
7076 mcp->flags = 0;
7077 rval = qla2x00_mailbox_command(vha, mcp);
7078
7079 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
7080
7081 if (rval != QLA_SUCCESS) {
7082 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
7083 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
7084 } else {
7085 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
7086 "Done %s.\n", __func__);
7087 }
7088
7089 return rval;
7090 }
7091
7092 int
ql26xx_led_config(scsi_qla_host_t * vha,uint16_t options,uint16_t * led)7093 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
7094 {
7095 struct qla_hw_data *ha = vha->hw;
7096 mbx_cmd_t mc;
7097 mbx_cmd_t *mcp = &mc;
7098 int rval;
7099
7100 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
7101 return QLA_FUNCTION_FAILED;
7102
7103 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
7104 __func__, options);
7105
7106 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
7107 mcp->mb[1] = options;
7108 mcp->out_mb = MBX_1|MBX_0;
7109 mcp->in_mb = MBX_1|MBX_0;
7110 if (options & BIT_0) {
7111 if (options & BIT_1) {
7112 mcp->mb[2] = led[2];
7113 mcp->out_mb |= MBX_2;
7114 }
7115 if (options & BIT_2) {
7116 mcp->mb[3] = led[0];
7117 mcp->out_mb |= MBX_3;
7118 }
7119 if (options & BIT_3) {
7120 mcp->mb[4] = led[1];
7121 mcp->out_mb |= MBX_4;
7122 }
7123 } else {
7124 mcp->in_mb |= MBX_4|MBX_3|MBX_2;
7125 }
7126 mcp->tov = MBX_TOV_SECONDS;
7127 mcp->flags = 0;
7128 rval = qla2x00_mailbox_command(vha, mcp);
7129 if (rval) {
7130 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
7131 __func__, rval, mcp->mb[0], mcp->mb[1]);
7132 return rval;
7133 }
7134
7135 if (options & BIT_0) {
7136 ha->beacon_blink_led = 0;
7137 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
7138 } else {
7139 led[2] = mcp->mb[2];
7140 led[0] = mcp->mb[3];
7141 led[1] = mcp->mb[4];
7142 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
7143 __func__, led[0], led[1], led[2]);
7144 }
7145
7146 return rval;
7147 }
7148
7149 /**
7150 * qla_no_op_mb(): This MB is used to check if FW is still alive and
7151 * able to generate an interrupt. Otherwise, a timeout will trigger
7152 * FW dump + reset
7153 * @vha: host adapter pointer
7154 * Return: None
7155 */
qla_no_op_mb(struct scsi_qla_host * vha)7156 void qla_no_op_mb(struct scsi_qla_host *vha)
7157 {
7158 mbx_cmd_t mc;
7159 mbx_cmd_t *mcp = &mc;
7160 int rval;
7161
7162 memset(&mc, 0, sizeof(mc));
7163 mcp->mb[0] = 0; // noop cmd= 0
7164 mcp->out_mb = MBX_0;
7165 mcp->in_mb = MBX_0;
7166 mcp->tov = 5;
7167 mcp->flags = 0;
7168 rval = qla2x00_mailbox_command(vha, mcp);
7169
7170 if (rval) {
7171 ql_dbg(ql_dbg_async, vha, 0x7071,
7172 "Failed %s %x\n", __func__, rval);
7173 }
7174 }
7175
qla_mailbox_passthru(scsi_qla_host_t * vha,uint16_t * mbx_in,uint16_t * mbx_out)7176 int qla_mailbox_passthru(scsi_qla_host_t *vha,
7177 uint16_t *mbx_in, uint16_t *mbx_out)
7178 {
7179 mbx_cmd_t mc;
7180 mbx_cmd_t *mcp = &mc;
7181 int rval = -EINVAL;
7182
7183 memset(&mc, 0, sizeof(mc));
7184 /* Receiving all 32 register's contents */
7185 memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t)));
7186
7187 mcp->out_mb = 0xFFFFFFFF;
7188 mcp->in_mb = 0xFFFFFFFF;
7189
7190 mcp->tov = MBX_TOV_SECONDS;
7191 mcp->flags = 0;
7192 mcp->bufp = NULL;
7193
7194 rval = qla2x00_mailbox_command(vha, mcp);
7195
7196 if (rval != QLA_SUCCESS) {
7197 ql_dbg(ql_dbg_mbx, vha, 0xf0a2,
7198 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
7199 } else {
7200 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n",
7201 __func__);
7202 /* passing all 32 register's contents */
7203 memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t));
7204 }
7205
7206 return rval;
7207 }
7208
qla_mpipt_validate_fw(scsi_qla_host_t * vha,u16 img_idx,uint16_t * state)7209 int qla_mpipt_validate_fw(scsi_qla_host_t *vha, u16 img_idx, uint16_t *state)
7210 {
7211 struct qla_hw_data *ha = vha->hw;
7212 mbx_cmd_t mc;
7213 mbx_cmd_t *mcp = &mc;
7214 int rval;
7215
7216 if (!IS_QLA28XX(ha)) {
7217 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s %d\n", __func__, __LINE__);
7218 return QLA_FUNCTION_FAILED;
7219 }
7220
7221 if (img_idx > 1) {
7222 ql_log(ql_log_info, vha, 0xffff,
7223 "%s %d Invalid flash image index [%d]\n",
7224 __func__, __LINE__, img_idx);
7225 return QLA_INVALID_COMMAND;
7226 }
7227
7228 memset(&mc, 0, sizeof(mc));
7229 mcp->mb[0] = MBC_MPI_PASSTHROUGH;
7230 mcp->mb[1] = MPIPT_SUBCMD_VALIDATE_FW;
7231 mcp->mb[2] = img_idx;
7232 mcp->out_mb = MBX_1|MBX_0;
7233 mcp->in_mb = MBX_2|MBX_1|MBX_0;
7234
7235 /* send mb via iocb */
7236 rval = qla24xx_send_mb_cmd(vha, &mc);
7237 if (rval) {
7238 ql_log(ql_log_info, vha, 0xffff, "%s:Failed %x (mb=%x,%x)\n",
7239 __func__, rval, mcp->mb[0], mcp->mb[1]);
7240 *state = mcp->mb[1];
7241 } else {
7242 ql_log(ql_log_info, vha, 0xffff, "%s: mb=%x,%x,%x\n", __func__,
7243 mcp->mb[0], mcp->mb[1], mcp->mb[2]);
7244 }
7245
7246 return rval;
7247 }
7248