1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 
8 /*
9  * Table for showing the current message id in use for particular level
10  * Change this table for addition of log/debug messages.
11  * ----------------------------------------------------------------------
12  * |             Level            |   Last Value Used  |     Holes	|
13  * ----------------------------------------------------------------------
14  * | Module Init and Probe        |       0x0116       | 0xfa           |
15  * | Mailbox commands             |       0x112b       |		|
16  * | Device Discovery             |       0x2084       |		|
17  * | Queue Command and IO tracing |       0x302f       | 0x3008,0x302d, |
18  * |                              |                    | 0x302e         |
19  * | DPC Thread                   |       0x401c       |		|
20  * | Async Events                 |       0x5057       | 0x5052		|
21  * | Timer Routines               |       0x6011       | 0x600e,0x600f  |
22  * | User Space Interactions      |       0x709e       | 0x7018,0x702e  |
23  * |                              |                    | 0x7039,0x7045  |
24  * | Task Management              |       0x803c       | 0x8025-0x8026  |
25  * |                              |                    | 0x800b,0x8039  |
26  * | AER/EEH                      |       0x900f       |		|
27  * | Virtual Port                 |       0xa007       |		|
28  * | ISP82XX Specific             |       0xb052       |    		|
29  * | MultiQ                       |       0xc00b       |		|
30  * | Misc                         |       0xd00b       |		|
31  * ----------------------------------------------------------------------
32  */
33 
34 #include "qla_def.h"
35 
36 #include <linux/delay.h>
37 
38 static uint32_t ql_dbg_offset = 0x800;
39 
40 static inline void
qla2xxx_prep_dump(struct qla_hw_data * ha,struct qla2xxx_fw_dump * fw_dump)41 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
42 {
43 	fw_dump->fw_major_version = htonl(ha->fw_major_version);
44 	fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
45 	fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
46 	fw_dump->fw_attributes = htonl(ha->fw_attributes);
47 
48 	fw_dump->vendor = htonl(ha->pdev->vendor);
49 	fw_dump->device = htonl(ha->pdev->device);
50 	fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
51 	fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
52 }
53 
54 static inline void *
qla2xxx_copy_queues(struct qla_hw_data * ha,void * ptr)55 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
56 {
57 	struct req_que *req = ha->req_q_map[0];
58 	struct rsp_que *rsp = ha->rsp_q_map[0];
59 	/* Request queue. */
60 	memcpy(ptr, req->ring, req->length *
61 	    sizeof(request_t));
62 
63 	/* Response queue. */
64 	ptr += req->length * sizeof(request_t);
65 	memcpy(ptr, rsp->ring, rsp->length  *
66 	    sizeof(response_t));
67 
68 	return ptr + (rsp->length * sizeof(response_t));
69 }
70 
71 static int
qla24xx_dump_ram(struct qla_hw_data * ha,uint32_t addr,uint32_t * ram,uint32_t ram_dwords,void ** nxt)72 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
73     uint32_t ram_dwords, void **nxt)
74 {
75 	int rval;
76 	uint32_t cnt, stat, timer, dwords, idx;
77 	uint16_t mb0;
78 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
79 	dma_addr_t dump_dma = ha->gid_list_dma;
80 	uint32_t *dump = (uint32_t *)ha->gid_list;
81 
82 	rval = QLA_SUCCESS;
83 	mb0 = 0;
84 
85 	WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
86 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
87 
88 	dwords = GID_LIST_SIZE / 4;
89 	for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
90 	    cnt += dwords, addr += dwords) {
91 		if (cnt + dwords > ram_dwords)
92 			dwords = ram_dwords - cnt;
93 
94 		WRT_REG_WORD(&reg->mailbox1, LSW(addr));
95 		WRT_REG_WORD(&reg->mailbox8, MSW(addr));
96 
97 		WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
98 		WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
99 		WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
100 		WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
101 
102 		WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
103 		WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
104 		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
105 
106 		for (timer = 6000000; timer; timer--) {
107 			/* Check for pending interrupts. */
108 			stat = RD_REG_DWORD(&reg->host_status);
109 			if (stat & HSRX_RISC_INT) {
110 				stat &= 0xff;
111 
112 				if (stat == 0x1 || stat == 0x2 ||
113 				    stat == 0x10 || stat == 0x11) {
114 					set_bit(MBX_INTERRUPT,
115 					    &ha->mbx_cmd_flags);
116 
117 					mb0 = RD_REG_WORD(&reg->mailbox0);
118 
119 					WRT_REG_DWORD(&reg->hccr,
120 					    HCCRX_CLR_RISC_INT);
121 					RD_REG_DWORD(&reg->hccr);
122 					break;
123 				}
124 
125 				/* Clear this intr; it wasn't a mailbox intr */
126 				WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
127 				RD_REG_DWORD(&reg->hccr);
128 			}
129 			udelay(5);
130 		}
131 
132 		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
133 			rval = mb0 & MBS_MASK;
134 			for (idx = 0; idx < dwords; idx++)
135 				ram[cnt + idx] = swab32(dump[idx]);
136 		} else {
137 			rval = QLA_FUNCTION_FAILED;
138 		}
139 	}
140 
141 	*nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
142 	return rval;
143 }
144 
145 static int
qla24xx_dump_memory(struct qla_hw_data * ha,uint32_t * code_ram,uint32_t cram_size,void ** nxt)146 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
147     uint32_t cram_size, void **nxt)
148 {
149 	int rval;
150 
151 	/* Code RAM. */
152 	rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
153 	if (rval != QLA_SUCCESS)
154 		return rval;
155 
156 	/* External Memory. */
157 	return qla24xx_dump_ram(ha, 0x100000, *nxt,
158 	    ha->fw_memory_size - 0x100000 + 1, nxt);
159 }
160 
161 static uint32_t *
qla24xx_read_window(struct device_reg_24xx __iomem * reg,uint32_t iobase,uint32_t count,uint32_t * buf)162 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
163     uint32_t count, uint32_t *buf)
164 {
165 	uint32_t __iomem *dmp_reg;
166 
167 	WRT_REG_DWORD(&reg->iobase_addr, iobase);
168 	dmp_reg = &reg->iobase_window;
169 	while (count--)
170 		*buf++ = htonl(RD_REG_DWORD(dmp_reg++));
171 
172 	return buf;
173 }
174 
175 static inline int
qla24xx_pause_risc(struct device_reg_24xx __iomem * reg)176 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
177 {
178 	int rval = QLA_SUCCESS;
179 	uint32_t cnt;
180 
181 	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
182 	for (cnt = 30000;
183 	    ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
184 	    rval == QLA_SUCCESS; cnt--) {
185 		if (cnt)
186 			udelay(100);
187 		else
188 			rval = QLA_FUNCTION_TIMEOUT;
189 	}
190 
191 	return rval;
192 }
193 
194 static int
qla24xx_soft_reset(struct qla_hw_data * ha)195 qla24xx_soft_reset(struct qla_hw_data *ha)
196 {
197 	int rval = QLA_SUCCESS;
198 	uint32_t cnt;
199 	uint16_t mb0, wd;
200 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
201 
202 	/* Reset RISC. */
203 	WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
204 	for (cnt = 0; cnt < 30000; cnt++) {
205 		if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
206 			break;
207 
208 		udelay(10);
209 	}
210 
211 	WRT_REG_DWORD(&reg->ctrl_status,
212 	    CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
213 	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
214 
215 	udelay(100);
216 	/* Wait for firmware to complete NVRAM accesses. */
217 	mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
218 	for (cnt = 10000 ; cnt && mb0; cnt--) {
219 		udelay(5);
220 		mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
221 		barrier();
222 	}
223 
224 	/* Wait for soft-reset to complete. */
225 	for (cnt = 0; cnt < 30000; cnt++) {
226 		if ((RD_REG_DWORD(&reg->ctrl_status) &
227 		    CSRX_ISP_SOFT_RESET) == 0)
228 			break;
229 
230 		udelay(10);
231 	}
232 	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
233 	RD_REG_DWORD(&reg->hccr);             /* PCI Posting. */
234 
235 	for (cnt = 30000; RD_REG_WORD(&reg->mailbox0) != 0 &&
236 	    rval == QLA_SUCCESS; cnt--) {
237 		if (cnt)
238 			udelay(100);
239 		else
240 			rval = QLA_FUNCTION_TIMEOUT;
241 	}
242 
243 	return rval;
244 }
245 
246 static int
qla2xxx_dump_ram(struct qla_hw_data * ha,uint32_t addr,uint16_t * ram,uint32_t ram_words,void ** nxt)247 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
248     uint32_t ram_words, void **nxt)
249 {
250 	int rval;
251 	uint32_t cnt, stat, timer, words, idx;
252 	uint16_t mb0;
253 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
254 	dma_addr_t dump_dma = ha->gid_list_dma;
255 	uint16_t *dump = (uint16_t *)ha->gid_list;
256 
257 	rval = QLA_SUCCESS;
258 	mb0 = 0;
259 
260 	WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
261 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
262 
263 	words = GID_LIST_SIZE / 2;
264 	for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
265 	    cnt += words, addr += words) {
266 		if (cnt + words > ram_words)
267 			words = ram_words - cnt;
268 
269 		WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
270 		WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
271 
272 		WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
273 		WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
274 		WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
275 		WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
276 
277 		WRT_MAILBOX_REG(ha, reg, 4, words);
278 		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
279 
280 		for (timer = 6000000; timer; timer--) {
281 			/* Check for pending interrupts. */
282 			stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
283 			if (stat & HSR_RISC_INT) {
284 				stat &= 0xff;
285 
286 				if (stat == 0x1 || stat == 0x2) {
287 					set_bit(MBX_INTERRUPT,
288 					    &ha->mbx_cmd_flags);
289 
290 					mb0 = RD_MAILBOX_REG(ha, reg, 0);
291 
292 					/* Release mailbox registers. */
293 					WRT_REG_WORD(&reg->semaphore, 0);
294 					WRT_REG_WORD(&reg->hccr,
295 					    HCCR_CLR_RISC_INT);
296 					RD_REG_WORD(&reg->hccr);
297 					break;
298 				} else if (stat == 0x10 || stat == 0x11) {
299 					set_bit(MBX_INTERRUPT,
300 					    &ha->mbx_cmd_flags);
301 
302 					mb0 = RD_MAILBOX_REG(ha, reg, 0);
303 
304 					WRT_REG_WORD(&reg->hccr,
305 					    HCCR_CLR_RISC_INT);
306 					RD_REG_WORD(&reg->hccr);
307 					break;
308 				}
309 
310 				/* clear this intr; it wasn't a mailbox intr */
311 				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
312 				RD_REG_WORD(&reg->hccr);
313 			}
314 			udelay(5);
315 		}
316 
317 		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
318 			rval = mb0 & MBS_MASK;
319 			for (idx = 0; idx < words; idx++)
320 				ram[cnt + idx] = swab16(dump[idx]);
321 		} else {
322 			rval = QLA_FUNCTION_FAILED;
323 		}
324 	}
325 
326 	*nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
327 	return rval;
328 }
329 
330 static inline void
qla2xxx_read_window(struct device_reg_2xxx __iomem * reg,uint32_t count,uint16_t * buf)331 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
332     uint16_t *buf)
333 {
334 	uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
335 
336 	while (count--)
337 		*buf++ = htons(RD_REG_WORD(dmp_reg++));
338 }
339 
340 static inline void *
qla24xx_copy_eft(struct qla_hw_data * ha,void * ptr)341 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
342 {
343 	if (!ha->eft)
344 		return ptr;
345 
346 	memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
347 	return ptr + ntohl(ha->fw_dump->eft_size);
348 }
349 
350 static inline void *
qla25xx_copy_fce(struct qla_hw_data * ha,void * ptr,uint32_t ** last_chain)351 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
352 {
353 	uint32_t cnt;
354 	uint32_t *iter_reg;
355 	struct qla2xxx_fce_chain *fcec = ptr;
356 
357 	if (!ha->fce)
358 		return ptr;
359 
360 	*last_chain = &fcec->type;
361 	fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
362 	fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
363 	    fce_calc_size(ha->fce_bufs));
364 	fcec->size = htonl(fce_calc_size(ha->fce_bufs));
365 	fcec->addr_l = htonl(LSD(ha->fce_dma));
366 	fcec->addr_h = htonl(MSD(ha->fce_dma));
367 
368 	iter_reg = fcec->eregs;
369 	for (cnt = 0; cnt < 8; cnt++)
370 		*iter_reg++ = htonl(ha->fce_mb[cnt]);
371 
372 	memcpy(iter_reg, ha->fce, ntohl(fcec->size));
373 
374 	return (char *)iter_reg + ntohl(fcec->size);
375 }
376 
377 static inline void *
qla25xx_copy_mq(struct qla_hw_data * ha,void * ptr,uint32_t ** last_chain)378 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
379 {
380 	uint32_t cnt, que_idx;
381 	uint8_t que_cnt;
382 	struct qla2xxx_mq_chain *mq = ptr;
383 	struct device_reg_25xxmq __iomem *reg;
384 
385 	if (!ha->mqenable)
386 		return ptr;
387 
388 	mq = ptr;
389 	*last_chain = &mq->type;
390 	mq->type = __constant_htonl(DUMP_CHAIN_MQ);
391 	mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
392 
393 	que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
394 		ha->max_req_queues : ha->max_rsp_queues;
395 	mq->count = htonl(que_cnt);
396 	for (cnt = 0; cnt < que_cnt; cnt++) {
397 		reg = (struct device_reg_25xxmq *) ((void *)
398 			ha->mqiobase + cnt * QLA_QUE_PAGE);
399 		que_idx = cnt * 4;
400 		mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
401 		mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
402 		mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(&reg->rsp_q_in));
403 		mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(&reg->rsp_q_out));
404 	}
405 
406 	return ptr + sizeof(struct qla2xxx_mq_chain);
407 }
408 
409 void
qla2xxx_dump_post_process(scsi_qla_host_t * vha,int rval)410 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
411 {
412 	struct qla_hw_data *ha = vha->hw;
413 
414 	if (rval != QLA_SUCCESS) {
415 		ql_log(ql_log_warn, vha, 0xd000,
416 		    "Failed to dump firmware (%x).\n", rval);
417 		ha->fw_dumped = 0;
418 	} else {
419 		ql_log(ql_log_info, vha, 0xd001,
420 		    "Firmware dump saved to temp buffer (%ld/%p).\n",
421 		    vha->host_no, ha->fw_dump);
422 		ha->fw_dumped = 1;
423 		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
424 	}
425 }
426 
427 /**
428  * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
429  * @ha: HA context
430  * @hardware_locked: Called with the hardware_lock
431  */
432 void
qla2300_fw_dump(scsi_qla_host_t * vha,int hardware_locked)433 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
434 {
435 	int		rval;
436 	uint32_t	cnt;
437 	struct qla_hw_data *ha = vha->hw;
438 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
439 	uint16_t __iomem *dmp_reg;
440 	unsigned long	flags;
441 	struct qla2300_fw_dump	*fw;
442 	void		*nxt;
443 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
444 
445 	flags = 0;
446 
447 	if (!hardware_locked)
448 		spin_lock_irqsave(&ha->hardware_lock, flags);
449 
450 	if (!ha->fw_dump) {
451 		ql_log(ql_log_warn, vha, 0xd002,
452 		    "No buffer available for dump.\n");
453 		goto qla2300_fw_dump_failed;
454 	}
455 
456 	if (ha->fw_dumped) {
457 		ql_log(ql_log_warn, vha, 0xd003,
458 		    "Firmware has been previously dumped (%p) "
459 		    "-- ignoring request.\n",
460 		    ha->fw_dump);
461 		goto qla2300_fw_dump_failed;
462 	}
463 	fw = &ha->fw_dump->isp.isp23;
464 	qla2xxx_prep_dump(ha, ha->fw_dump);
465 
466 	rval = QLA_SUCCESS;
467 	fw->hccr = htons(RD_REG_WORD(&reg->hccr));
468 
469 	/* Pause RISC. */
470 	WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
471 	if (IS_QLA2300(ha)) {
472 		for (cnt = 30000;
473 		    (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
474 			rval == QLA_SUCCESS; cnt--) {
475 			if (cnt)
476 				udelay(100);
477 			else
478 				rval = QLA_FUNCTION_TIMEOUT;
479 		}
480 	} else {
481 		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
482 		udelay(10);
483 	}
484 
485 	if (rval == QLA_SUCCESS) {
486 		dmp_reg = &reg->flash_address;
487 		for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
488 			fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
489 
490 		dmp_reg = &reg->u.isp2300.req_q_in;
491 		for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
492 			fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
493 
494 		dmp_reg = &reg->u.isp2300.mailbox0;
495 		for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
496 			fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
497 
498 		WRT_REG_WORD(&reg->ctrl_status, 0x40);
499 		qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
500 
501 		WRT_REG_WORD(&reg->ctrl_status, 0x50);
502 		qla2xxx_read_window(reg, 48, fw->dma_reg);
503 
504 		WRT_REG_WORD(&reg->ctrl_status, 0x00);
505 		dmp_reg = &reg->risc_hw;
506 		for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
507 			fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
508 
509 		WRT_REG_WORD(&reg->pcr, 0x2000);
510 		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
511 
512 		WRT_REG_WORD(&reg->pcr, 0x2200);
513 		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
514 
515 		WRT_REG_WORD(&reg->pcr, 0x2400);
516 		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
517 
518 		WRT_REG_WORD(&reg->pcr, 0x2600);
519 		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
520 
521 		WRT_REG_WORD(&reg->pcr, 0x2800);
522 		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
523 
524 		WRT_REG_WORD(&reg->pcr, 0x2A00);
525 		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
526 
527 		WRT_REG_WORD(&reg->pcr, 0x2C00);
528 		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
529 
530 		WRT_REG_WORD(&reg->pcr, 0x2E00);
531 		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
532 
533 		WRT_REG_WORD(&reg->ctrl_status, 0x10);
534 		qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
535 
536 		WRT_REG_WORD(&reg->ctrl_status, 0x20);
537 		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
538 
539 		WRT_REG_WORD(&reg->ctrl_status, 0x30);
540 		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
541 
542 		/* Reset RISC. */
543 		WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
544 		for (cnt = 0; cnt < 30000; cnt++) {
545 			if ((RD_REG_WORD(&reg->ctrl_status) &
546 			    CSR_ISP_SOFT_RESET) == 0)
547 				break;
548 
549 			udelay(10);
550 		}
551 	}
552 
553 	if (!IS_QLA2300(ha)) {
554 		for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
555 		    rval == QLA_SUCCESS; cnt--) {
556 			if (cnt)
557 				udelay(100);
558 			else
559 				rval = QLA_FUNCTION_TIMEOUT;
560 		}
561 	}
562 
563 	/* Get RISC SRAM. */
564 	if (rval == QLA_SUCCESS)
565 		rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
566 		    sizeof(fw->risc_ram) / 2, &nxt);
567 
568 	/* Get stack SRAM. */
569 	if (rval == QLA_SUCCESS)
570 		rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
571 		    sizeof(fw->stack_ram) / 2, &nxt);
572 
573 	/* Get data SRAM. */
574 	if (rval == QLA_SUCCESS)
575 		rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
576 		    ha->fw_memory_size - 0x11000 + 1, &nxt);
577 
578 	if (rval == QLA_SUCCESS)
579 		qla2xxx_copy_queues(ha, nxt);
580 
581 	qla2xxx_dump_post_process(base_vha, rval);
582 
583 qla2300_fw_dump_failed:
584 	if (!hardware_locked)
585 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
586 }
587 
588 /**
589  * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
590  * @ha: HA context
591  * @hardware_locked: Called with the hardware_lock
592  */
593 void
qla2100_fw_dump(scsi_qla_host_t * vha,int hardware_locked)594 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
595 {
596 	int		rval;
597 	uint32_t	cnt, timer;
598 	uint16_t	risc_address;
599 	uint16_t	mb0, mb2;
600 	struct qla_hw_data *ha = vha->hw;
601 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
602 	uint16_t __iomem *dmp_reg;
603 	unsigned long	flags;
604 	struct qla2100_fw_dump	*fw;
605 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
606 
607 	risc_address = 0;
608 	mb0 = mb2 = 0;
609 	flags = 0;
610 
611 	if (!hardware_locked)
612 		spin_lock_irqsave(&ha->hardware_lock, flags);
613 
614 	if (!ha->fw_dump) {
615 		ql_log(ql_log_warn, vha, 0xd004,
616 		    "No buffer available for dump.\n");
617 		goto qla2100_fw_dump_failed;
618 	}
619 
620 	if (ha->fw_dumped) {
621 		ql_log(ql_log_warn, vha, 0xd005,
622 		    "Firmware has been previously dumped (%p) "
623 		    "-- ignoring request.\n",
624 		    ha->fw_dump);
625 		goto qla2100_fw_dump_failed;
626 	}
627 	fw = &ha->fw_dump->isp.isp21;
628 	qla2xxx_prep_dump(ha, ha->fw_dump);
629 
630 	rval = QLA_SUCCESS;
631 	fw->hccr = htons(RD_REG_WORD(&reg->hccr));
632 
633 	/* Pause RISC. */
634 	WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
635 	for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
636 	    rval == QLA_SUCCESS; cnt--) {
637 		if (cnt)
638 			udelay(100);
639 		else
640 			rval = QLA_FUNCTION_TIMEOUT;
641 	}
642 	if (rval == QLA_SUCCESS) {
643 		dmp_reg = &reg->flash_address;
644 		for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
645 			fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
646 
647 		dmp_reg = &reg->u.isp2100.mailbox0;
648 		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
649 			if (cnt == 8)
650 				dmp_reg = &reg->u_end.isp2200.mailbox8;
651 
652 			fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
653 		}
654 
655 		dmp_reg = &reg->u.isp2100.unused_2[0];
656 		for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
657 			fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
658 
659 		WRT_REG_WORD(&reg->ctrl_status, 0x00);
660 		dmp_reg = &reg->risc_hw;
661 		for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
662 			fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
663 
664 		WRT_REG_WORD(&reg->pcr, 0x2000);
665 		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
666 
667 		WRT_REG_WORD(&reg->pcr, 0x2100);
668 		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
669 
670 		WRT_REG_WORD(&reg->pcr, 0x2200);
671 		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
672 
673 		WRT_REG_WORD(&reg->pcr, 0x2300);
674 		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
675 
676 		WRT_REG_WORD(&reg->pcr, 0x2400);
677 		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
678 
679 		WRT_REG_WORD(&reg->pcr, 0x2500);
680 		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
681 
682 		WRT_REG_WORD(&reg->pcr, 0x2600);
683 		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
684 
685 		WRT_REG_WORD(&reg->pcr, 0x2700);
686 		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
687 
688 		WRT_REG_WORD(&reg->ctrl_status, 0x10);
689 		qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
690 
691 		WRT_REG_WORD(&reg->ctrl_status, 0x20);
692 		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
693 
694 		WRT_REG_WORD(&reg->ctrl_status, 0x30);
695 		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
696 
697 		/* Reset the ISP. */
698 		WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
699 	}
700 
701 	for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
702 	    rval == QLA_SUCCESS; cnt--) {
703 		if (cnt)
704 			udelay(100);
705 		else
706 			rval = QLA_FUNCTION_TIMEOUT;
707 	}
708 
709 	/* Pause RISC. */
710 	if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
711 	    (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
712 
713 		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
714 		for (cnt = 30000;
715 		    (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
716 		    rval == QLA_SUCCESS; cnt--) {
717 			if (cnt)
718 				udelay(100);
719 			else
720 				rval = QLA_FUNCTION_TIMEOUT;
721 		}
722 		if (rval == QLA_SUCCESS) {
723 			/* Set memory configuration and timing. */
724 			if (IS_QLA2100(ha))
725 				WRT_REG_WORD(&reg->mctr, 0xf1);
726 			else
727 				WRT_REG_WORD(&reg->mctr, 0xf2);
728 			RD_REG_WORD(&reg->mctr);	/* PCI Posting. */
729 
730 			/* Release RISC. */
731 			WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
732 		}
733 	}
734 
735 	if (rval == QLA_SUCCESS) {
736 		/* Get RISC SRAM. */
737 		risc_address = 0x1000;
738  		WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
739 		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
740 	}
741 	for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
742 	    cnt++, risc_address++) {
743  		WRT_MAILBOX_REG(ha, reg, 1, risc_address);
744 		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
745 
746 		for (timer = 6000000; timer != 0; timer--) {
747 			/* Check for pending interrupts. */
748 			if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
749 				if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
750 					set_bit(MBX_INTERRUPT,
751 					    &ha->mbx_cmd_flags);
752 
753 					mb0 = RD_MAILBOX_REG(ha, reg, 0);
754 					mb2 = RD_MAILBOX_REG(ha, reg, 2);
755 
756 					WRT_REG_WORD(&reg->semaphore, 0);
757 					WRT_REG_WORD(&reg->hccr,
758 					    HCCR_CLR_RISC_INT);
759 					RD_REG_WORD(&reg->hccr);
760 					break;
761 				}
762 				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
763 				RD_REG_WORD(&reg->hccr);
764 			}
765 			udelay(5);
766 		}
767 
768 		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
769 			rval = mb0 & MBS_MASK;
770 			fw->risc_ram[cnt] = htons(mb2);
771 		} else {
772 			rval = QLA_FUNCTION_FAILED;
773 		}
774 	}
775 
776 	if (rval == QLA_SUCCESS)
777 		qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
778 
779 	qla2xxx_dump_post_process(base_vha, rval);
780 
781 qla2100_fw_dump_failed:
782 	if (!hardware_locked)
783 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
784 }
785 
786 void
qla24xx_fw_dump(scsi_qla_host_t * vha,int hardware_locked)787 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
788 {
789 	int		rval;
790 	uint32_t	cnt;
791 	uint32_t	risc_address;
792 	struct qla_hw_data *ha = vha->hw;
793 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
794 	uint32_t __iomem *dmp_reg;
795 	uint32_t	*iter_reg;
796 	uint16_t __iomem *mbx_reg;
797 	unsigned long	flags;
798 	struct qla24xx_fw_dump *fw;
799 	uint32_t	ext_mem_cnt;
800 	void		*nxt;
801 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
802 
803 	if (IS_QLA82XX(ha))
804 		return;
805 
806 	risc_address = ext_mem_cnt = 0;
807 	flags = 0;
808 
809 	if (!hardware_locked)
810 		spin_lock_irqsave(&ha->hardware_lock, flags);
811 
812 	if (!ha->fw_dump) {
813 		ql_log(ql_log_warn, vha, 0xd006,
814 		    "No buffer available for dump.\n");
815 		goto qla24xx_fw_dump_failed;
816 	}
817 
818 	if (ha->fw_dumped) {
819 		ql_log(ql_log_warn, vha, 0xd007,
820 		    "Firmware has been previously dumped (%p) "
821 		    "-- ignoring request.\n",
822 		    ha->fw_dump);
823 		goto qla24xx_fw_dump_failed;
824 	}
825 	fw = &ha->fw_dump->isp.isp24;
826 	qla2xxx_prep_dump(ha, ha->fw_dump);
827 
828 	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
829 
830 	/* Pause RISC. */
831 	rval = qla24xx_pause_risc(reg);
832 	if (rval != QLA_SUCCESS)
833 		goto qla24xx_fw_dump_failed_0;
834 
835 	/* Host interface registers. */
836 	dmp_reg = &reg->flash_addr;
837 	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
838 		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
839 
840 	/* Disable interrupts. */
841 	WRT_REG_DWORD(&reg->ictrl, 0);
842 	RD_REG_DWORD(&reg->ictrl);
843 
844 	/* Shadow registers. */
845 	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
846 	RD_REG_DWORD(&reg->iobase_addr);
847 	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
848 	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
849 
850 	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
851 	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
852 
853 	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
854 	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
855 
856 	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
857 	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
858 
859 	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
860 	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
861 
862 	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
863 	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
864 
865 	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
866 	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
867 
868 	/* Mailbox registers. */
869 	mbx_reg = &reg->mailbox0;
870 	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
871 		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
872 
873 	/* Transfer sequence registers. */
874 	iter_reg = fw->xseq_gp_reg;
875 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
876 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
877 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
878 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
879 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
880 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
881 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
882 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
883 
884 	qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
885 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
886 
887 	/* Receive sequence registers. */
888 	iter_reg = fw->rseq_gp_reg;
889 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
890 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
891 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
892 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
893 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
894 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
895 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
896 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
897 
898 	qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
899 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
900 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
901 
902 	/* Command DMA registers. */
903 	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
904 
905 	/* Queues. */
906 	iter_reg = fw->req0_dma_reg;
907 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
908 	dmp_reg = &reg->iobase_q;
909 	for (cnt = 0; cnt < 7; cnt++)
910 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
911 
912 	iter_reg = fw->resp0_dma_reg;
913 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
914 	dmp_reg = &reg->iobase_q;
915 	for (cnt = 0; cnt < 7; cnt++)
916 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
917 
918 	iter_reg = fw->req1_dma_reg;
919 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
920 	dmp_reg = &reg->iobase_q;
921 	for (cnt = 0; cnt < 7; cnt++)
922 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
923 
924 	/* Transmit DMA registers. */
925 	iter_reg = fw->xmt0_dma_reg;
926 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
927 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
928 
929 	iter_reg = fw->xmt1_dma_reg;
930 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
931 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
932 
933 	iter_reg = fw->xmt2_dma_reg;
934 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
935 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
936 
937 	iter_reg = fw->xmt3_dma_reg;
938 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
939 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
940 
941 	iter_reg = fw->xmt4_dma_reg;
942 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
943 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
944 
945 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
946 
947 	/* Receive DMA registers. */
948 	iter_reg = fw->rcvt0_data_dma_reg;
949 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
950 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
951 
952 	iter_reg = fw->rcvt1_data_dma_reg;
953 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
954 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
955 
956 	/* RISC registers. */
957 	iter_reg = fw->risc_gp_reg;
958 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
959 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
960 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
961 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
962 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
963 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
964 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
965 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
966 
967 	/* Local memory controller registers. */
968 	iter_reg = fw->lmc_reg;
969 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
970 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
971 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
972 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
973 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
974 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
975 	qla24xx_read_window(reg, 0x3060, 16, iter_reg);
976 
977 	/* Fibre Protocol Module registers. */
978 	iter_reg = fw->fpm_hdw_reg;
979 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
980 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
981 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
982 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
983 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
984 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
985 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
986 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
987 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
988 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
989 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
990 	qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
991 
992 	/* Frame Buffer registers. */
993 	iter_reg = fw->fb_hdw_reg;
994 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
995 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
996 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
997 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
998 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
999 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1000 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1001 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1002 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1003 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1004 	qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1005 
1006 	rval = qla24xx_soft_reset(ha);
1007 	if (rval != QLA_SUCCESS)
1008 		goto qla24xx_fw_dump_failed_0;
1009 
1010 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1011 	    &nxt);
1012 	if (rval != QLA_SUCCESS)
1013 		goto qla24xx_fw_dump_failed_0;
1014 
1015 	nxt = qla2xxx_copy_queues(ha, nxt);
1016 
1017 	qla24xx_copy_eft(ha, nxt);
1018 
1019 qla24xx_fw_dump_failed_0:
1020 	qla2xxx_dump_post_process(base_vha, rval);
1021 
1022 qla24xx_fw_dump_failed:
1023 	if (!hardware_locked)
1024 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1025 }
1026 
1027 void
qla25xx_fw_dump(scsi_qla_host_t * vha,int hardware_locked)1028 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1029 {
1030 	int		rval;
1031 	uint32_t	cnt;
1032 	uint32_t	risc_address;
1033 	struct qla_hw_data *ha = vha->hw;
1034 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1035 	uint32_t __iomem *dmp_reg;
1036 	uint32_t	*iter_reg;
1037 	uint16_t __iomem *mbx_reg;
1038 	unsigned long	flags;
1039 	struct qla25xx_fw_dump *fw;
1040 	uint32_t	ext_mem_cnt;
1041 	void		*nxt, *nxt_chain;
1042 	uint32_t	*last_chain = NULL;
1043 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1044 
1045 	risc_address = ext_mem_cnt = 0;
1046 	flags = 0;
1047 
1048 	if (!hardware_locked)
1049 		spin_lock_irqsave(&ha->hardware_lock, flags);
1050 
1051 	if (!ha->fw_dump) {
1052 		ql_log(ql_log_warn, vha, 0xd008,
1053 		    "No buffer available for dump.\n");
1054 		goto qla25xx_fw_dump_failed;
1055 	}
1056 
1057 	if (ha->fw_dumped) {
1058 		ql_log(ql_log_warn, vha, 0xd009,
1059 		    "Firmware has been previously dumped (%p) "
1060 		    "-- ignoring request.\n",
1061 		    ha->fw_dump);
1062 		goto qla25xx_fw_dump_failed;
1063 	}
1064 	fw = &ha->fw_dump->isp.isp25;
1065 	qla2xxx_prep_dump(ha, ha->fw_dump);
1066 	ha->fw_dump->version = __constant_htonl(2);
1067 
1068 	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1069 
1070 	/* Pause RISC. */
1071 	rval = qla24xx_pause_risc(reg);
1072 	if (rval != QLA_SUCCESS)
1073 		goto qla25xx_fw_dump_failed_0;
1074 
1075 	/* Host/Risc registers. */
1076 	iter_reg = fw->host_risc_reg;
1077 	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1078 	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1079 
1080 	/* PCIe registers. */
1081 	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1082 	RD_REG_DWORD(&reg->iobase_addr);
1083 	WRT_REG_DWORD(&reg->iobase_window, 0x01);
1084 	dmp_reg = &reg->iobase_c4;
1085 	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1086 	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1087 	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1088 	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1089 
1090 	WRT_REG_DWORD(&reg->iobase_window, 0x00);
1091 	RD_REG_DWORD(&reg->iobase_window);
1092 
1093 	/* Host interface registers. */
1094 	dmp_reg = &reg->flash_addr;
1095 	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1096 		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1097 
1098 	/* Disable interrupts. */
1099 	WRT_REG_DWORD(&reg->ictrl, 0);
1100 	RD_REG_DWORD(&reg->ictrl);
1101 
1102 	/* Shadow registers. */
1103 	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1104 	RD_REG_DWORD(&reg->iobase_addr);
1105 	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1106 	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1107 
1108 	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1109 	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1110 
1111 	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1112 	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1113 
1114 	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1115 	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1116 
1117 	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1118 	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1119 
1120 	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1121 	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1122 
1123 	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1124 	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1125 
1126 	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1127 	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1128 
1129 	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1130 	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1131 
1132 	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1133 	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1134 
1135 	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1136 	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1137 
1138 	/* RISC I/O register. */
1139 	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1140 	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1141 
1142 	/* Mailbox registers. */
1143 	mbx_reg = &reg->mailbox0;
1144 	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1145 		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1146 
1147 	/* Transfer sequence registers. */
1148 	iter_reg = fw->xseq_gp_reg;
1149 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1150 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1151 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1152 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1153 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1154 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1155 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1156 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1157 
1158 	iter_reg = fw->xseq_0_reg;
1159 	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1160 	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1161 	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1162 
1163 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1164 
1165 	/* Receive sequence registers. */
1166 	iter_reg = fw->rseq_gp_reg;
1167 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1168 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1169 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1170 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1171 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1172 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1173 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1174 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1175 
1176 	iter_reg = fw->rseq_0_reg;
1177 	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1178 	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1179 
1180 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1181 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1182 
1183 	/* Auxiliary sequence registers. */
1184 	iter_reg = fw->aseq_gp_reg;
1185 	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1186 	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1187 	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1188 	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1189 	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1190 	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1191 	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1192 	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1193 
1194 	iter_reg = fw->aseq_0_reg;
1195 	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1196 	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1197 
1198 	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1199 	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1200 
1201 	/* Command DMA registers. */
1202 	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1203 
1204 	/* Queues. */
1205 	iter_reg = fw->req0_dma_reg;
1206 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1207 	dmp_reg = &reg->iobase_q;
1208 	for (cnt = 0; cnt < 7; cnt++)
1209 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1210 
1211 	iter_reg = fw->resp0_dma_reg;
1212 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1213 	dmp_reg = &reg->iobase_q;
1214 	for (cnt = 0; cnt < 7; cnt++)
1215 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1216 
1217 	iter_reg = fw->req1_dma_reg;
1218 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1219 	dmp_reg = &reg->iobase_q;
1220 	for (cnt = 0; cnt < 7; cnt++)
1221 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1222 
1223 	/* Transmit DMA registers. */
1224 	iter_reg = fw->xmt0_dma_reg;
1225 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1226 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1227 
1228 	iter_reg = fw->xmt1_dma_reg;
1229 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1230 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1231 
1232 	iter_reg = fw->xmt2_dma_reg;
1233 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1234 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1235 
1236 	iter_reg = fw->xmt3_dma_reg;
1237 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1238 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1239 
1240 	iter_reg = fw->xmt4_dma_reg;
1241 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1242 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1243 
1244 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1245 
1246 	/* Receive DMA registers. */
1247 	iter_reg = fw->rcvt0_data_dma_reg;
1248 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1249 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1250 
1251 	iter_reg = fw->rcvt1_data_dma_reg;
1252 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1253 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1254 
1255 	/* RISC registers. */
1256 	iter_reg = fw->risc_gp_reg;
1257 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1258 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1259 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1260 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1261 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1262 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1263 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1264 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1265 
1266 	/* Local memory controller registers. */
1267 	iter_reg = fw->lmc_reg;
1268 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1269 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1270 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1271 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1272 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1273 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1274 	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1275 	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1276 
1277 	/* Fibre Protocol Module registers. */
1278 	iter_reg = fw->fpm_hdw_reg;
1279 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1280 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1281 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1282 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1283 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1284 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1285 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1286 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1287 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1288 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1289 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1290 	qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1291 
1292 	/* Frame Buffer registers. */
1293 	iter_reg = fw->fb_hdw_reg;
1294 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1295 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1296 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1297 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1298 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1299 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1300 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1301 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1302 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1303 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1304 	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1305 	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1306 
1307 	/* Multi queue registers */
1308 	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1309 	    &last_chain);
1310 
1311 	rval = qla24xx_soft_reset(ha);
1312 	if (rval != QLA_SUCCESS)
1313 		goto qla25xx_fw_dump_failed_0;
1314 
1315 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1316 	    &nxt);
1317 	if (rval != QLA_SUCCESS)
1318 		goto qla25xx_fw_dump_failed_0;
1319 
1320 	nxt = qla2xxx_copy_queues(ha, nxt);
1321 
1322 	nxt = qla24xx_copy_eft(ha, nxt);
1323 
1324 	/* Chain entries -- started with MQ. */
1325 	qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1326 	if (last_chain) {
1327 		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1328 		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1329 	}
1330 
1331 qla25xx_fw_dump_failed_0:
1332 	qla2xxx_dump_post_process(base_vha, rval);
1333 
1334 qla25xx_fw_dump_failed:
1335 	if (!hardware_locked)
1336 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1337 }
1338 
1339 void
qla81xx_fw_dump(scsi_qla_host_t * vha,int hardware_locked)1340 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1341 {
1342 	int		rval;
1343 	uint32_t	cnt;
1344 	uint32_t	risc_address;
1345 	struct qla_hw_data *ha = vha->hw;
1346 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1347 	uint32_t __iomem *dmp_reg;
1348 	uint32_t	*iter_reg;
1349 	uint16_t __iomem *mbx_reg;
1350 	unsigned long	flags;
1351 	struct qla81xx_fw_dump *fw;
1352 	uint32_t	ext_mem_cnt;
1353 	void		*nxt, *nxt_chain;
1354 	uint32_t	*last_chain = NULL;
1355 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1356 
1357 	risc_address = ext_mem_cnt = 0;
1358 	flags = 0;
1359 
1360 	if (!hardware_locked)
1361 		spin_lock_irqsave(&ha->hardware_lock, flags);
1362 
1363 	if (!ha->fw_dump) {
1364 		ql_log(ql_log_warn, vha, 0xd00a,
1365 		    "No buffer available for dump.\n");
1366 		goto qla81xx_fw_dump_failed;
1367 	}
1368 
1369 	if (ha->fw_dumped) {
1370 		ql_log(ql_log_warn, vha, 0xd00b,
1371 		    "Firmware has been previously dumped (%p) "
1372 		    "-- ignoring request.\n",
1373 		    ha->fw_dump);
1374 		goto qla81xx_fw_dump_failed;
1375 	}
1376 	fw = &ha->fw_dump->isp.isp81;
1377 	qla2xxx_prep_dump(ha, ha->fw_dump);
1378 
1379 	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1380 
1381 	/* Pause RISC. */
1382 	rval = qla24xx_pause_risc(reg);
1383 	if (rval != QLA_SUCCESS)
1384 		goto qla81xx_fw_dump_failed_0;
1385 
1386 	/* Host/Risc registers. */
1387 	iter_reg = fw->host_risc_reg;
1388 	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1389 	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1390 
1391 	/* PCIe registers. */
1392 	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1393 	RD_REG_DWORD(&reg->iobase_addr);
1394 	WRT_REG_DWORD(&reg->iobase_window, 0x01);
1395 	dmp_reg = &reg->iobase_c4;
1396 	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1397 	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1398 	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1399 	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1400 
1401 	WRT_REG_DWORD(&reg->iobase_window, 0x00);
1402 	RD_REG_DWORD(&reg->iobase_window);
1403 
1404 	/* Host interface registers. */
1405 	dmp_reg = &reg->flash_addr;
1406 	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1407 		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1408 
1409 	/* Disable interrupts. */
1410 	WRT_REG_DWORD(&reg->ictrl, 0);
1411 	RD_REG_DWORD(&reg->ictrl);
1412 
1413 	/* Shadow registers. */
1414 	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1415 	RD_REG_DWORD(&reg->iobase_addr);
1416 	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1417 	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1418 
1419 	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1420 	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1421 
1422 	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1423 	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1424 
1425 	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1426 	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1427 
1428 	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1429 	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1430 
1431 	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1432 	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1433 
1434 	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1435 	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1436 
1437 	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1438 	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1439 
1440 	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1441 	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1442 
1443 	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1444 	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1445 
1446 	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1447 	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1448 
1449 	/* RISC I/O register. */
1450 	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1451 	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1452 
1453 	/* Mailbox registers. */
1454 	mbx_reg = &reg->mailbox0;
1455 	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1456 		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1457 
1458 	/* Transfer sequence registers. */
1459 	iter_reg = fw->xseq_gp_reg;
1460 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1461 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1462 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1463 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1464 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1465 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1466 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1467 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1468 
1469 	iter_reg = fw->xseq_0_reg;
1470 	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1471 	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1472 	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1473 
1474 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1475 
1476 	/* Receive sequence registers. */
1477 	iter_reg = fw->rseq_gp_reg;
1478 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1479 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1480 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1481 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1482 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1483 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1484 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1485 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1486 
1487 	iter_reg = fw->rseq_0_reg;
1488 	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1489 	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1490 
1491 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1492 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1493 
1494 	/* Auxiliary sequence registers. */
1495 	iter_reg = fw->aseq_gp_reg;
1496 	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1497 	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1498 	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1499 	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1500 	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1501 	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1502 	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1503 	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1504 
1505 	iter_reg = fw->aseq_0_reg;
1506 	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1507 	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1508 
1509 	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1510 	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1511 
1512 	/* Command DMA registers. */
1513 	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1514 
1515 	/* Queues. */
1516 	iter_reg = fw->req0_dma_reg;
1517 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1518 	dmp_reg = &reg->iobase_q;
1519 	for (cnt = 0; cnt < 7; cnt++)
1520 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1521 
1522 	iter_reg = fw->resp0_dma_reg;
1523 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1524 	dmp_reg = &reg->iobase_q;
1525 	for (cnt = 0; cnt < 7; cnt++)
1526 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1527 
1528 	iter_reg = fw->req1_dma_reg;
1529 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1530 	dmp_reg = &reg->iobase_q;
1531 	for (cnt = 0; cnt < 7; cnt++)
1532 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1533 
1534 	/* Transmit DMA registers. */
1535 	iter_reg = fw->xmt0_dma_reg;
1536 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1537 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1538 
1539 	iter_reg = fw->xmt1_dma_reg;
1540 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1541 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1542 
1543 	iter_reg = fw->xmt2_dma_reg;
1544 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1545 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1546 
1547 	iter_reg = fw->xmt3_dma_reg;
1548 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1549 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1550 
1551 	iter_reg = fw->xmt4_dma_reg;
1552 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1553 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1554 
1555 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1556 
1557 	/* Receive DMA registers. */
1558 	iter_reg = fw->rcvt0_data_dma_reg;
1559 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1560 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1561 
1562 	iter_reg = fw->rcvt1_data_dma_reg;
1563 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1564 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1565 
1566 	/* RISC registers. */
1567 	iter_reg = fw->risc_gp_reg;
1568 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1569 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1570 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1571 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1572 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1573 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1574 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1575 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1576 
1577 	/* Local memory controller registers. */
1578 	iter_reg = fw->lmc_reg;
1579 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1580 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1581 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1582 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1583 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1584 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1585 	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1586 	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1587 
1588 	/* Fibre Protocol Module registers. */
1589 	iter_reg = fw->fpm_hdw_reg;
1590 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1591 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1592 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1593 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1594 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1595 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1596 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1597 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1598 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1599 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1600 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1601 	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1602 	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1603 	qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1604 
1605 	/* Frame Buffer registers. */
1606 	iter_reg = fw->fb_hdw_reg;
1607 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1608 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1609 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1610 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1611 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1612 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1613 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1614 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1615 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1616 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1617 	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1618 	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1619 	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1620 
1621 	/* Multi queue registers */
1622 	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1623 	    &last_chain);
1624 
1625 	rval = qla24xx_soft_reset(ha);
1626 	if (rval != QLA_SUCCESS)
1627 		goto qla81xx_fw_dump_failed_0;
1628 
1629 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1630 	    &nxt);
1631 	if (rval != QLA_SUCCESS)
1632 		goto qla81xx_fw_dump_failed_0;
1633 
1634 	nxt = qla2xxx_copy_queues(ha, nxt);
1635 
1636 	nxt = qla24xx_copy_eft(ha, nxt);
1637 
1638 	/* Chain entries -- started with MQ. */
1639 	qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1640 	if (last_chain) {
1641 		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1642 		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1643 	}
1644 
1645 qla81xx_fw_dump_failed_0:
1646 	qla2xxx_dump_post_process(base_vha, rval);
1647 
1648 qla81xx_fw_dump_failed:
1649 	if (!hardware_locked)
1650 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1651 }
1652 
1653 /****************************************************************************/
1654 /*                         Driver Debug Functions.                          */
1655 /****************************************************************************/
1656 
1657 static inline int
ql_mask_match(uint32_t level)1658 ql_mask_match(uint32_t level)
1659 {
1660 	if (ql2xextended_error_logging == 1)
1661 		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
1662 	return (level & ql2xextended_error_logging) == level;
1663 }
1664 
1665 /*
1666  * This function is for formatting and logging debug information.
1667  * It is to be used when vha is available. It formats the message
1668  * and logs it to the messages file.
1669  * parameters:
1670  * level: The level of the debug messages to be printed.
1671  *        If ql2xextended_error_logging value is correctly set,
1672  *        this message will appear in the messages file.
1673  * vha:   Pointer to the scsi_qla_host_t.
1674  * id:    This is a unique identifier for the level. It identifies the
1675  *        part of the code from where the message originated.
1676  * msg:   The message to be displayed.
1677  */
1678 void
ql_dbg(uint32_t level,scsi_qla_host_t * vha,int32_t id,const char * fmt,...)1679 ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
1680 {
1681 	va_list va;
1682 	struct va_format vaf;
1683 
1684 	if (!ql_mask_match(level))
1685 		return;
1686 
1687 	va_start(va, fmt);
1688 
1689 	vaf.fmt = fmt;
1690 	vaf.va = &va;
1691 
1692 	if (vha != NULL) {
1693 		const struct pci_dev *pdev = vha->hw->pdev;
1694 		/* <module-name> <pci-name> <msg-id>:<host> Message */
1695 		pr_warn("%s [%s]-%04x:%ld: %pV",
1696 			QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
1697 			vha->host_no, &vaf);
1698 	} else {
1699 		pr_warn("%s [%s]-%04x: : %pV",
1700 			QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
1701 	}
1702 
1703 	va_end(va);
1704 
1705 }
1706 
1707 /*
1708  * This function is for formatting and logging debug information.
1709  * It is to be used when vha is not available and pci is availble,
1710  * i.e., before host allocation. It formats the message and logs it
1711  * to the messages file.
1712  * parameters:
1713  * level: The level of the debug messages to be printed.
1714  *        If ql2xextended_error_logging value is correctly set,
1715  *        this message will appear in the messages file.
1716  * pdev:  Pointer to the struct pci_dev.
1717  * id:    This is a unique id for the level. It identifies the part
1718  *        of the code from where the message originated.
1719  * msg:   The message to be displayed.
1720  */
1721 void
ql_dbg_pci(uint32_t level,struct pci_dev * pdev,int32_t id,const char * fmt,...)1722 ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
1723 	   const char *fmt, ...)
1724 {
1725 	va_list va;
1726 	struct va_format vaf;
1727 
1728 	if (pdev == NULL)
1729 		return;
1730 	if (!ql_mask_match(level))
1731 		return;
1732 
1733 	va_start(va, fmt);
1734 
1735 	vaf.fmt = fmt;
1736 	vaf.va = &va;
1737 
1738 	/* <module-name> <dev-name>:<msg-id> Message */
1739 	pr_warn("%s [%s]-%04x: : %pV",
1740 		QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
1741 
1742 	va_end(va);
1743 }
1744 
1745 /*
1746  * This function is for formatting and logging log messages.
1747  * It is to be used when vha is available. It formats the message
1748  * and logs it to the messages file. All the messages will be logged
1749  * irrespective of value of ql2xextended_error_logging.
1750  * parameters:
1751  * level: The level of the log messages to be printed in the
1752  *        messages file.
1753  * vha:   Pointer to the scsi_qla_host_t
1754  * id:    This is a unique id for the level. It identifies the
1755  *        part of the code from where the message originated.
1756  * msg:   The message to be displayed.
1757  */
1758 void
ql_log(uint32_t level,scsi_qla_host_t * vha,int32_t id,const char * fmt,...)1759 ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
1760 {
1761 	va_list va;
1762 	struct va_format vaf;
1763 	char pbuf[128];
1764 
1765 	if (level > ql_errlev)
1766 		return;
1767 
1768 	if (vha != NULL) {
1769 		const struct pci_dev *pdev = vha->hw->pdev;
1770 		/* <module-name> <msg-id>:<host> Message */
1771 		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
1772 			QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
1773 	} else {
1774 		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
1775 			QL_MSGHDR, "0000:00:00.0", id);
1776 	}
1777 	pbuf[sizeof(pbuf) - 1] = 0;
1778 
1779 	va_start(va, fmt);
1780 
1781 	vaf.fmt = fmt;
1782 	vaf.va = &va;
1783 
1784 	switch (level) {
1785 	case 0: /* FATAL LOG */
1786 		pr_crit("%s%pV", pbuf, &vaf);
1787 		break;
1788 	case 1:
1789 		pr_err("%s%pV", pbuf, &vaf);
1790 		break;
1791 	case 2:
1792 		pr_warn("%s%pV", pbuf, &vaf);
1793 		break;
1794 	default:
1795 		pr_info("%s%pV", pbuf, &vaf);
1796 		break;
1797 	}
1798 
1799 	va_end(va);
1800 }
1801 
1802 /*
1803  * This function is for formatting and logging log messages.
1804  * It is to be used when vha is not available and pci is availble,
1805  * i.e., before host allocation. It formats the message and logs
1806  * it to the messages file. All the messages are logged irrespective
1807  * of the value of ql2xextended_error_logging.
1808  * parameters:
1809  * level: The level of the log messages to be printed in the
1810  *        messages file.
1811  * pdev:  Pointer to the struct pci_dev.
1812  * id:    This is a unique id for the level. It identifies the
1813  *        part of the code from where the message originated.
1814  * msg:   The message to be displayed.
1815  */
1816 void
ql_log_pci(uint32_t level,struct pci_dev * pdev,int32_t id,const char * fmt,...)1817 ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
1818 	   const char *fmt, ...)
1819 {
1820 	va_list va;
1821 	struct va_format vaf;
1822 	char pbuf[128];
1823 
1824 	if (pdev == NULL)
1825 		return;
1826 	if (level > ql_errlev)
1827 		return;
1828 
1829 	/* <module-name> <dev-name>:<msg-id> Message */
1830 	snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
1831 		 QL_MSGHDR, dev_name(&(pdev->dev)), id);
1832 	pbuf[sizeof(pbuf) - 1] = 0;
1833 
1834 	va_start(va, fmt);
1835 
1836 	vaf.fmt = fmt;
1837 	vaf.va = &va;
1838 
1839 	switch (level) {
1840 	case 0: /* FATAL LOG */
1841 		pr_crit("%s%pV", pbuf, &vaf);
1842 		break;
1843 	case 1:
1844 		pr_err("%s%pV", pbuf, &vaf);
1845 		break;
1846 	case 2:
1847 		pr_warn("%s%pV", pbuf, &vaf);
1848 		break;
1849 	default:
1850 		pr_info("%s%pV", pbuf, &vaf);
1851 		break;
1852 	}
1853 
1854 	va_end(va);
1855 }
1856 
1857 void
ql_dump_regs(uint32_t level,scsi_qla_host_t * vha,int32_t id)1858 ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
1859 {
1860 	int i;
1861 	struct qla_hw_data *ha = vha->hw;
1862 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1863 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1864 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1865 	uint16_t __iomem *mbx_reg;
1866 
1867 	if (!ql_mask_match(level))
1868 		return;
1869 
1870 	if (IS_QLA82XX(ha))
1871 		mbx_reg = &reg82->mailbox_in[0];
1872 	else if (IS_FWI2_CAPABLE(ha))
1873 		mbx_reg = &reg24->mailbox0;
1874 	else
1875 		mbx_reg = MAILBOX_REG(ha, reg, 0);
1876 
1877 	ql_dbg(level, vha, id, "Mailbox registers:\n");
1878 	for (i = 0; i < 6; i++)
1879 		ql_dbg(level, vha, id,
1880 		    "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
1881 }
1882 
1883 
1884 void
ql_dump_buffer(uint32_t level,scsi_qla_host_t * vha,int32_t id,uint8_t * b,uint32_t size)1885 ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
1886 	uint8_t *b, uint32_t size)
1887 {
1888 	uint32_t cnt;
1889 	uint8_t c;
1890 
1891 	if (!ql_mask_match(level))
1892 		return;
1893 
1894 	ql_dbg(level, vha, id, " 0   1   2   3   4   5   6   7   8   "
1895 	    "9  Ah  Bh  Ch  Dh  Eh  Fh\n");
1896 	ql_dbg(level, vha, id, "----------------------------------"
1897 	    "----------------------------\n");
1898 
1899 	ql_dbg(level, vha, id, " ");
1900 	for (cnt = 0; cnt < size;) {
1901 		c = *b++;
1902 		printk("%02x", (uint32_t) c);
1903 		cnt++;
1904 		if (!(cnt % 16))
1905 			printk("\n");
1906 		else
1907 			printk("  ");
1908 	}
1909 	if (cnt % 16)
1910 		ql_dbg(level, vha, id, "\n");
1911 }
1912