1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/slab.h>
12 #include <linux/delay.h>
13 
14 static int qla24xx_vport_disable(struct fc_vport *, bool);
15 
16 /* SYSFS attributes --------------------------------------------------------- */
17 
18 static ssize_t
qla2x00_sysfs_read_fw_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)19 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
20 			   struct bin_attribute *bin_attr,
21 			   char *buf, loff_t off, size_t count)
22 {
23 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
24 	    struct device, kobj)));
25 	struct qla_hw_data *ha = vha->hw;
26 	int rval = 0;
27 
28 	if (ha->fw_dump_reading == 0)
29 		return 0;
30 
31 	if (IS_QLA82XX(ha)) {
32 		if (off < ha->md_template_size) {
33 			rval = memory_read_from_buffer(buf, count,
34 			    &off, ha->md_tmplt_hdr, ha->md_template_size);
35 			return rval;
36 		}
37 		off -= ha->md_template_size;
38 		rval = memory_read_from_buffer(buf, count,
39 		    &off, ha->md_dump, ha->md_dump_size);
40 		return rval;
41 	} else
42 		return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
43 					ha->fw_dump_len);
44 }
45 
46 static ssize_t
qla2x00_sysfs_write_fw_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)47 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
48 			    struct bin_attribute *bin_attr,
49 			    char *buf, loff_t off, size_t count)
50 {
51 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
52 	    struct device, kobj)));
53 	struct qla_hw_data *ha = vha->hw;
54 	int reading;
55 
56 	if (off != 0)
57 		return (0);
58 
59 	reading = simple_strtol(buf, NULL, 10);
60 	switch (reading) {
61 	case 0:
62 		if (!ha->fw_dump_reading)
63 			break;
64 
65 		ql_log(ql_log_info, vha, 0x705d,
66 		    "Firmware dump cleared on (%ld).\n", vha->host_no);
67 
68 		if (IS_QLA82XX(vha->hw)) {
69 			qla82xx_md_free(vha);
70 			qla82xx_md_prep(vha);
71 		}
72 		ha->fw_dump_reading = 0;
73 		ha->fw_dumped = 0;
74 		break;
75 	case 1:
76 		if (ha->fw_dumped && !ha->fw_dump_reading) {
77 			ha->fw_dump_reading = 1;
78 
79 			ql_log(ql_log_info, vha, 0x705e,
80 			    "Raw firmware dump ready for read on (%ld).\n",
81 			    vha->host_no);
82 		}
83 		break;
84 	case 2:
85 		qla2x00_alloc_fw_dump(vha);
86 		break;
87 	case 3:
88 		if (IS_QLA82XX(ha)) {
89 			qla82xx_idc_lock(ha);
90 			qla82xx_set_reset_owner(vha);
91 			qla82xx_idc_unlock(ha);
92 		} else
93 			qla2x00_system_error(vha);
94 		break;
95 	case 4:
96 		if (IS_QLA82XX(ha)) {
97 			if (ha->md_tmplt_hdr)
98 				ql_dbg(ql_dbg_user, vha, 0x705b,
99 				    "MiniDump supported with this firmware.\n");
100 			else
101 				ql_dbg(ql_dbg_user, vha, 0x709d,
102 				    "MiniDump not supported with this firmware.\n");
103 		}
104 		break;
105 	case 5:
106 		if (IS_QLA82XX(ha))
107 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
108 		break;
109 	}
110 	return count;
111 }
112 
113 static struct bin_attribute sysfs_fw_dump_attr = {
114 	.attr = {
115 		.name = "fw_dump",
116 		.mode = S_IRUSR | S_IWUSR,
117 	},
118 	.size = 0,
119 	.read = qla2x00_sysfs_read_fw_dump,
120 	.write = qla2x00_sysfs_write_fw_dump,
121 };
122 
123 static ssize_t
qla2x00_sysfs_read_nvram(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)124 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
125 			 struct bin_attribute *bin_attr,
126 			 char *buf, loff_t off, size_t count)
127 {
128 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
129 	    struct device, kobj)));
130 	struct qla_hw_data *ha = vha->hw;
131 
132 	if (!capable(CAP_SYS_ADMIN))
133 		return 0;
134 
135 	if (IS_NOCACHE_VPD_TYPE(ha))
136 		ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
137 		    ha->nvram_size);
138 	return memory_read_from_buffer(buf, count, &off, ha->nvram,
139 					ha->nvram_size);
140 }
141 
142 static ssize_t
qla2x00_sysfs_write_nvram(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)143 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
144 			  struct bin_attribute *bin_attr,
145 			  char *buf, loff_t off, size_t count)
146 {
147 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
148 	    struct device, kobj)));
149 	struct qla_hw_data *ha = vha->hw;
150 	uint16_t	cnt;
151 
152 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
153 	    !ha->isp_ops->write_nvram)
154 		return -EINVAL;
155 
156 	/* Checksum NVRAM. */
157 	if (IS_FWI2_CAPABLE(ha)) {
158 		uint32_t *iter;
159 		uint32_t chksum;
160 
161 		iter = (uint32_t *)buf;
162 		chksum = 0;
163 		for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
164 			chksum += le32_to_cpu(*iter++);
165 		chksum = ~chksum + 1;
166 		*iter = cpu_to_le32(chksum);
167 	} else {
168 		uint8_t *iter;
169 		uint8_t chksum;
170 
171 		iter = (uint8_t *)buf;
172 		chksum = 0;
173 		for (cnt = 0; cnt < count - 1; cnt++)
174 			chksum += *iter++;
175 		chksum = ~chksum + 1;
176 		*iter = chksum;
177 	}
178 
179 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
180 		ql_log(ql_log_warn, vha, 0x705f,
181 		    "HBA not online, failing NVRAM update.\n");
182 		return -EAGAIN;
183 	}
184 
185 	/* Write NVRAM. */
186 	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
187 	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
188 	    count);
189 
190 	ql_dbg(ql_dbg_user, vha, 0x7060,
191 	    "Setting ISP_ABORT_NEEDED\n");
192 	/* NVRAM settings take effect immediately. */
193 	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
194 	qla2xxx_wake_dpc(vha);
195 	qla2x00_wait_for_chip_reset(vha);
196 
197 	return count;
198 }
199 
200 static struct bin_attribute sysfs_nvram_attr = {
201 	.attr = {
202 		.name = "nvram",
203 		.mode = S_IRUSR | S_IWUSR,
204 	},
205 	.size = 512,
206 	.read = qla2x00_sysfs_read_nvram,
207 	.write = qla2x00_sysfs_write_nvram,
208 };
209 
210 static ssize_t
qla2x00_sysfs_read_optrom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)211 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
212 			  struct bin_attribute *bin_attr,
213 			  char *buf, loff_t off, size_t count)
214 {
215 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
216 	    struct device, kobj)));
217 	struct qla_hw_data *ha = vha->hw;
218 
219 	if (ha->optrom_state != QLA_SREADING)
220 		return 0;
221 
222 	return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
223 					ha->optrom_region_size);
224 }
225 
226 static ssize_t
qla2x00_sysfs_write_optrom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)227 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
228 			   struct bin_attribute *bin_attr,
229 			   char *buf, loff_t off, size_t count)
230 {
231 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
232 	    struct device, kobj)));
233 	struct qla_hw_data *ha = vha->hw;
234 
235 	if (ha->optrom_state != QLA_SWRITING)
236 		return -EINVAL;
237 	if (off > ha->optrom_region_size)
238 		return -ERANGE;
239 	if (off + count > ha->optrom_region_size)
240 		count = ha->optrom_region_size - off;
241 
242 	memcpy(&ha->optrom_buffer[off], buf, count);
243 
244 	return count;
245 }
246 
247 static struct bin_attribute sysfs_optrom_attr = {
248 	.attr = {
249 		.name = "optrom",
250 		.mode = S_IRUSR | S_IWUSR,
251 	},
252 	.size = 0,
253 	.read = qla2x00_sysfs_read_optrom,
254 	.write = qla2x00_sysfs_write_optrom,
255 };
256 
257 static ssize_t
qla2x00_sysfs_write_optrom_ctl(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)258 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
259 			       struct bin_attribute *bin_attr,
260 			       char *buf, loff_t off, size_t count)
261 {
262 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
263 	    struct device, kobj)));
264 	struct qla_hw_data *ha = vha->hw;
265 
266 	uint32_t start = 0;
267 	uint32_t size = ha->optrom_size;
268 	int val, valid;
269 
270 	if (off)
271 		return -EINVAL;
272 
273 	if (unlikely(pci_channel_offline(ha->pdev)))
274 		return -EAGAIN;
275 
276 	if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
277 		return -EINVAL;
278 	if (start > ha->optrom_size)
279 		return -EINVAL;
280 
281 	switch (val) {
282 	case 0:
283 		if (ha->optrom_state != QLA_SREADING &&
284 		    ha->optrom_state != QLA_SWRITING)
285 			return -EINVAL;
286 
287 		ha->optrom_state = QLA_SWAITING;
288 
289 		ql_dbg(ql_dbg_user, vha, 0x7061,
290 		    "Freeing flash region allocation -- 0x%x bytes.\n",
291 		    ha->optrom_region_size);
292 
293 		vfree(ha->optrom_buffer);
294 		ha->optrom_buffer = NULL;
295 		break;
296 	case 1:
297 		if (ha->optrom_state != QLA_SWAITING)
298 			return -EINVAL;
299 
300 		ha->optrom_region_start = start;
301 		ha->optrom_region_size = start + size > ha->optrom_size ?
302 		    ha->optrom_size - start : size;
303 
304 		ha->optrom_state = QLA_SREADING;
305 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
306 		if (ha->optrom_buffer == NULL) {
307 			ql_log(ql_log_warn, vha, 0x7062,
308 			    "Unable to allocate memory for optrom retrieval "
309 			    "(%x).\n", ha->optrom_region_size);
310 
311 			ha->optrom_state = QLA_SWAITING;
312 			return -ENOMEM;
313 		}
314 
315 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
316 			ql_log(ql_log_warn, vha, 0x7063,
317 			    "HBA not online, failing NVRAM update.\n");
318 			return -EAGAIN;
319 		}
320 
321 		ql_dbg(ql_dbg_user, vha, 0x7064,
322 		    "Reading flash region -- 0x%x/0x%x.\n",
323 		    ha->optrom_region_start, ha->optrom_region_size);
324 
325 		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
326 		ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
327 		    ha->optrom_region_start, ha->optrom_region_size);
328 		break;
329 	case 2:
330 		if (ha->optrom_state != QLA_SWAITING)
331 			return -EINVAL;
332 
333 		/*
334 		 * We need to be more restrictive on which FLASH regions are
335 		 * allowed to be updated via user-space.  Regions accessible
336 		 * via this method include:
337 		 *
338 		 * ISP21xx/ISP22xx/ISP23xx type boards:
339 		 *
340 		 * 	0x000000 -> 0x020000 -- Boot code.
341 		 *
342 		 * ISP2322/ISP24xx type boards:
343 		 *
344 		 * 	0x000000 -> 0x07ffff -- Boot code.
345 		 * 	0x080000 -> 0x0fffff -- Firmware.
346 		 *
347 		 * ISP25xx type boards:
348 		 *
349 		 * 	0x000000 -> 0x07ffff -- Boot code.
350 		 * 	0x080000 -> 0x0fffff -- Firmware.
351 		 * 	0x120000 -> 0x12ffff -- VPD and HBA parameters.
352 		 */
353 		valid = 0;
354 		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
355 			valid = 1;
356 		else if (start == (ha->flt_region_boot * 4) ||
357 		    start == (ha->flt_region_fw * 4))
358 			valid = 1;
359 		else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
360 			valid = 1;
361 		if (!valid) {
362 			ql_log(ql_log_warn, vha, 0x7065,
363 			    "Invalid start region 0x%x/0x%x.\n", start, size);
364 			return -EINVAL;
365 		}
366 
367 		ha->optrom_region_start = start;
368 		ha->optrom_region_size = start + size > ha->optrom_size ?
369 		    ha->optrom_size - start : size;
370 
371 		ha->optrom_state = QLA_SWRITING;
372 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
373 		if (ha->optrom_buffer == NULL) {
374 			ql_log(ql_log_warn, vha, 0x7066,
375 			    "Unable to allocate memory for optrom update "
376 			    "(%x)\n", ha->optrom_region_size);
377 
378 			ha->optrom_state = QLA_SWAITING;
379 			return -ENOMEM;
380 		}
381 
382 		ql_dbg(ql_dbg_user, vha, 0x7067,
383 		    "Staging flash region write -- 0x%x/0x%x.\n",
384 		    ha->optrom_region_start, ha->optrom_region_size);
385 
386 		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
387 		break;
388 	case 3:
389 		if (ha->optrom_state != QLA_SWRITING)
390 			return -EINVAL;
391 
392 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
393 			ql_log(ql_log_warn, vha, 0x7068,
394 			    "HBA not online, failing flash update.\n");
395 			return -EAGAIN;
396 		}
397 
398 		ql_dbg(ql_dbg_user, vha, 0x7069,
399 		    "Writing flash region -- 0x%x/0x%x.\n",
400 		    ha->optrom_region_start, ha->optrom_region_size);
401 
402 		ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
403 		    ha->optrom_region_start, ha->optrom_region_size);
404 		break;
405 	default:
406 		return -EINVAL;
407 	}
408 	return count;
409 }
410 
411 static struct bin_attribute sysfs_optrom_ctl_attr = {
412 	.attr = {
413 		.name = "optrom_ctl",
414 		.mode = S_IWUSR,
415 	},
416 	.size = 0,
417 	.write = qla2x00_sysfs_write_optrom_ctl,
418 };
419 
420 static ssize_t
qla2x00_sysfs_read_vpd(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)421 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
422 		       struct bin_attribute *bin_attr,
423 		       char *buf, loff_t off, size_t count)
424 {
425 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
426 	    struct device, kobj)));
427 	struct qla_hw_data *ha = vha->hw;
428 
429 	if (unlikely(pci_channel_offline(ha->pdev)))
430 		return -EAGAIN;
431 
432 	if (!capable(CAP_SYS_ADMIN))
433 		return -EINVAL;
434 
435 	if (IS_NOCACHE_VPD_TYPE(ha))
436 		ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
437 		    ha->vpd_size);
438 	return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
439 }
440 
441 static ssize_t
qla2x00_sysfs_write_vpd(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)442 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
443 			struct bin_attribute *bin_attr,
444 			char *buf, loff_t off, size_t count)
445 {
446 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
447 	    struct device, kobj)));
448 	struct qla_hw_data *ha = vha->hw;
449 	uint8_t *tmp_data;
450 
451 	if (unlikely(pci_channel_offline(ha->pdev)))
452 		return 0;
453 
454 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
455 	    !ha->isp_ops->write_nvram)
456 		return 0;
457 
458 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
459 		ql_log(ql_log_warn, vha, 0x706a,
460 		    "HBA not online, failing VPD update.\n");
461 		return -EAGAIN;
462 	}
463 
464 	/* Write NVRAM. */
465 	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
466 	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
467 
468 	/* Update flash version information for 4Gb & above. */
469 	if (!IS_FWI2_CAPABLE(ha))
470 		return -EINVAL;
471 
472 	tmp_data = vmalloc(256);
473 	if (!tmp_data) {
474 		ql_log(ql_log_warn, vha, 0x706b,
475 		    "Unable to allocate memory for VPD information update.\n");
476 		return -ENOMEM;
477 	}
478 	ha->isp_ops->get_flash_version(vha, tmp_data);
479 	vfree(tmp_data);
480 
481 	return count;
482 }
483 
484 static struct bin_attribute sysfs_vpd_attr = {
485 	.attr = {
486 		.name = "vpd",
487 		.mode = S_IRUSR | S_IWUSR,
488 	},
489 	.size = 0,
490 	.read = qla2x00_sysfs_read_vpd,
491 	.write = qla2x00_sysfs_write_vpd,
492 };
493 
494 static ssize_t
qla2x00_sysfs_read_sfp(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)495 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
496 		       struct bin_attribute *bin_attr,
497 		       char *buf, loff_t off, size_t count)
498 {
499 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
500 	    struct device, kobj)));
501 	struct qla_hw_data *ha = vha->hw;
502 	uint16_t iter, addr, offset;
503 	int rval;
504 
505 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
506 		return 0;
507 
508 	if (ha->sfp_data)
509 		goto do_read;
510 
511 	ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
512 	    &ha->sfp_data_dma);
513 	if (!ha->sfp_data) {
514 		ql_log(ql_log_warn, vha, 0x706c,
515 		    "Unable to allocate memory for SFP read-data.\n");
516 		return 0;
517 	}
518 
519 do_read:
520 	memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
521 	addr = 0xa0;
522 	for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
523 	    iter++, offset += SFP_BLOCK_SIZE) {
524 		if (iter == 4) {
525 			/* Skip to next device address. */
526 			addr = 0xa2;
527 			offset = 0;
528 		}
529 
530 		rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
531 		    addr, offset, SFP_BLOCK_SIZE, 0);
532 		if (rval != QLA_SUCCESS) {
533 			ql_log(ql_log_warn, vha, 0x706d,
534 			    "Unable to read SFP data (%x/%x/%x).\n", rval,
535 			    addr, offset);
536 
537 			return -EIO;
538 		}
539 		memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
540 		buf += SFP_BLOCK_SIZE;
541 	}
542 
543 	return count;
544 }
545 
546 static struct bin_attribute sysfs_sfp_attr = {
547 	.attr = {
548 		.name = "sfp",
549 		.mode = S_IRUSR | S_IWUSR,
550 	},
551 	.size = SFP_DEV_SIZE * 2,
552 	.read = qla2x00_sysfs_read_sfp,
553 };
554 
555 static ssize_t
qla2x00_sysfs_write_reset(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)556 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
557 			struct bin_attribute *bin_attr,
558 			char *buf, loff_t off, size_t count)
559 {
560 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
561 	    struct device, kobj)));
562 	struct qla_hw_data *ha = vha->hw;
563 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
564 	int type;
565 
566 	if (off != 0)
567 		return -EINVAL;
568 
569 	type = simple_strtol(buf, NULL, 10);
570 	switch (type) {
571 	case 0x2025c:
572 		ql_log(ql_log_info, vha, 0x706e,
573 		    "Issuing ISP reset.\n");
574 
575 		scsi_block_requests(vha->host);
576 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
577 		if (IS_QLA82XX(ha)) {
578 			qla82xx_idc_lock(ha);
579 			qla82xx_set_reset_owner(vha);
580 			qla82xx_idc_unlock(ha);
581 		}
582 		qla2xxx_wake_dpc(vha);
583 		qla2x00_wait_for_chip_reset(vha);
584 		scsi_unblock_requests(vha->host);
585 		break;
586 	case 0x2025d:
587 		if (!IS_QLA81XX(ha))
588 			return -EPERM;
589 
590 		ql_log(ql_log_info, vha, 0x706f,
591 		    "Issuing MPI reset.\n");
592 
593 		/* Make sure FC side is not in reset */
594 		qla2x00_wait_for_hba_online(vha);
595 
596 		/* Issue MPI reset */
597 		scsi_block_requests(vha->host);
598 		if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
599 			ql_log(ql_log_warn, vha, 0x7070,
600 			    "MPI reset failed.\n");
601 		scsi_unblock_requests(vha->host);
602 		break;
603 	case 0x2025e:
604 		if (!IS_QLA82XX(ha) || vha != base_vha) {
605 			ql_log(ql_log_info, vha, 0x7071,
606 			    "FCoE ctx reset no supported.\n");
607 			return -EPERM;
608 		}
609 
610 		ql_log(ql_log_info, vha, 0x7072,
611 		    "Issuing FCoE ctx reset.\n");
612 		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
613 		qla2xxx_wake_dpc(vha);
614 		qla2x00_wait_for_fcoe_ctx_reset(vha);
615 		break;
616 	}
617 	return count;
618 }
619 
620 static struct bin_attribute sysfs_reset_attr = {
621 	.attr = {
622 		.name = "reset",
623 		.mode = S_IWUSR,
624 	},
625 	.size = 0,
626 	.write = qla2x00_sysfs_write_reset,
627 };
628 
629 static ssize_t
qla2x00_sysfs_write_edc(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)630 qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
631 			struct bin_attribute *bin_attr,
632 			char *buf, loff_t off, size_t count)
633 {
634 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
635 	    struct device, kobj)));
636 	struct qla_hw_data *ha = vha->hw;
637 	uint16_t dev, adr, opt, len;
638 	int rval;
639 
640 	ha->edc_data_len = 0;
641 
642 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
643 		return -EINVAL;
644 
645 	if (!ha->edc_data) {
646 		ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
647 		    &ha->edc_data_dma);
648 		if (!ha->edc_data) {
649 			ql_log(ql_log_warn, vha, 0x7073,
650 			    "Unable to allocate memory for EDC write.\n");
651 			return -ENOMEM;
652 		}
653 	}
654 
655 	dev = le16_to_cpup((void *)&buf[0]);
656 	adr = le16_to_cpup((void *)&buf[2]);
657 	opt = le16_to_cpup((void *)&buf[4]);
658 	len = le16_to_cpup((void *)&buf[6]);
659 
660 	if (!(opt & BIT_0))
661 		if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
662 			return -EINVAL;
663 
664 	memcpy(ha->edc_data, &buf[8], len);
665 
666 	rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
667 	    dev, adr, len, opt);
668 	if (rval != QLA_SUCCESS) {
669 		ql_log(ql_log_warn, vha, 0x7074,
670 		    "Unable to write EDC (%x) %02x:%04x:%02x:%02x:%02hhx\n",
671 		    rval, dev, adr, opt, len, buf[8]);
672 		return -EIO;
673 	}
674 
675 	return count;
676 }
677 
678 static struct bin_attribute sysfs_edc_attr = {
679 	.attr = {
680 		.name = "edc",
681 		.mode = S_IWUSR,
682 	},
683 	.size = 0,
684 	.write = qla2x00_sysfs_write_edc,
685 };
686 
687 static ssize_t
qla2x00_sysfs_write_edc_status(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)688 qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
689 			struct bin_attribute *bin_attr,
690 			char *buf, loff_t off, size_t count)
691 {
692 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
693 	    struct device, kobj)));
694 	struct qla_hw_data *ha = vha->hw;
695 	uint16_t dev, adr, opt, len;
696 	int rval;
697 
698 	ha->edc_data_len = 0;
699 
700 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
701 		return -EINVAL;
702 
703 	if (!ha->edc_data) {
704 		ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
705 		    &ha->edc_data_dma);
706 		if (!ha->edc_data) {
707 			ql_log(ql_log_warn, vha, 0x708c,
708 			    "Unable to allocate memory for EDC status.\n");
709 			return -ENOMEM;
710 		}
711 	}
712 
713 	dev = le16_to_cpup((void *)&buf[0]);
714 	adr = le16_to_cpup((void *)&buf[2]);
715 	opt = le16_to_cpup((void *)&buf[4]);
716 	len = le16_to_cpup((void *)&buf[6]);
717 
718 	if (!(opt & BIT_0))
719 		if (len == 0 || len > DMA_POOL_SIZE)
720 			return -EINVAL;
721 
722 	memset(ha->edc_data, 0, len);
723 	rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
724 			dev, adr, len, opt);
725 	if (rval != QLA_SUCCESS) {
726 		ql_log(ql_log_info, vha, 0x7075,
727 		    "Unable to write EDC status (%x) %02x:%04x:%02x:%02x.\n",
728 		    rval, dev, adr, opt, len);
729 		return -EIO;
730 	}
731 
732 	ha->edc_data_len = len;
733 
734 	return count;
735 }
736 
737 static ssize_t
qla2x00_sysfs_read_edc_status(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)738 qla2x00_sysfs_read_edc_status(struct file *filp, struct kobject *kobj,
739 			   struct bin_attribute *bin_attr,
740 			   char *buf, loff_t off, size_t count)
741 {
742 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
743 	    struct device, kobj)));
744 	struct qla_hw_data *ha = vha->hw;
745 
746 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
747 		return 0;
748 
749 	if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
750 		return -EINVAL;
751 
752 	memcpy(buf, ha->edc_data, ha->edc_data_len);
753 
754 	return ha->edc_data_len;
755 }
756 
757 static struct bin_attribute sysfs_edc_status_attr = {
758 	.attr = {
759 		.name = "edc_status",
760 		.mode = S_IRUSR | S_IWUSR,
761 	},
762 	.size = 0,
763 	.write = qla2x00_sysfs_write_edc_status,
764 	.read = qla2x00_sysfs_read_edc_status,
765 };
766 
767 static ssize_t
qla2x00_sysfs_read_xgmac_stats(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)768 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
769 		       struct bin_attribute *bin_attr,
770 		       char *buf, loff_t off, size_t count)
771 {
772 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
773 	    struct device, kobj)));
774 	struct qla_hw_data *ha = vha->hw;
775 	int rval;
776 	uint16_t actual_size;
777 
778 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
779 		return 0;
780 
781 	if (ha->xgmac_data)
782 		goto do_read;
783 
784 	ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
785 	    &ha->xgmac_data_dma, GFP_KERNEL);
786 	if (!ha->xgmac_data) {
787 		ql_log(ql_log_warn, vha, 0x7076,
788 		    "Unable to allocate memory for XGMAC read-data.\n");
789 		return 0;
790 	}
791 
792 do_read:
793 	actual_size = 0;
794 	memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
795 
796 	rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
797 	    XGMAC_DATA_SIZE, &actual_size);
798 	if (rval != QLA_SUCCESS) {
799 		ql_log(ql_log_warn, vha, 0x7077,
800 		    "Unable to read XGMAC data (%x).\n", rval);
801 		count = 0;
802 	}
803 
804 	count = actual_size > count ? count: actual_size;
805 	memcpy(buf, ha->xgmac_data, count);
806 
807 	return count;
808 }
809 
810 static struct bin_attribute sysfs_xgmac_stats_attr = {
811 	.attr = {
812 		.name = "xgmac_stats",
813 		.mode = S_IRUSR,
814 	},
815 	.size = 0,
816 	.read = qla2x00_sysfs_read_xgmac_stats,
817 };
818 
819 static ssize_t
qla2x00_sysfs_read_dcbx_tlv(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)820 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
821 		       struct bin_attribute *bin_attr,
822 		       char *buf, loff_t off, size_t count)
823 {
824 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
825 	    struct device, kobj)));
826 	struct qla_hw_data *ha = vha->hw;
827 	int rval;
828 	uint16_t actual_size;
829 
830 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
831 		return 0;
832 
833 	if (ha->dcbx_tlv)
834 		goto do_read;
835 
836 	ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
837 	    &ha->dcbx_tlv_dma, GFP_KERNEL);
838 	if (!ha->dcbx_tlv) {
839 		ql_log(ql_log_warn, vha, 0x7078,
840 		    "Unable to allocate memory for DCBX TLV read-data.\n");
841 		return -ENOMEM;
842 	}
843 
844 do_read:
845 	actual_size = 0;
846 	memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
847 
848 	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
849 	    DCBX_TLV_DATA_SIZE);
850 	if (rval != QLA_SUCCESS) {
851 		ql_log(ql_log_warn, vha, 0x7079,
852 		    "Unable to read DCBX TLV (%x).\n", rval);
853 		return -EIO;
854 	}
855 
856 	memcpy(buf, ha->dcbx_tlv, count);
857 
858 	return count;
859 }
860 
861 static struct bin_attribute sysfs_dcbx_tlv_attr = {
862 	.attr = {
863 		.name = "dcbx_tlv",
864 		.mode = S_IRUSR,
865 	},
866 	.size = 0,
867 	.read = qla2x00_sysfs_read_dcbx_tlv,
868 };
869 
870 static struct sysfs_entry {
871 	char *name;
872 	struct bin_attribute *attr;
873 	int is4GBp_only;
874 } bin_file_entries[] = {
875 	{ "fw_dump", &sysfs_fw_dump_attr, },
876 	{ "nvram", &sysfs_nvram_attr, },
877 	{ "optrom", &sysfs_optrom_attr, },
878 	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
879 	{ "vpd", &sysfs_vpd_attr, 1 },
880 	{ "sfp", &sysfs_sfp_attr, 1 },
881 	{ "reset", &sysfs_reset_attr, },
882 	{ "edc", &sysfs_edc_attr, 2 },
883 	{ "edc_status", &sysfs_edc_status_attr, 2 },
884 	{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
885 	{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
886 	{ NULL },
887 };
888 
889 void
qla2x00_alloc_sysfs_attr(scsi_qla_host_t * vha)890 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
891 {
892 	struct Scsi_Host *host = vha->host;
893 	struct sysfs_entry *iter;
894 	int ret;
895 
896 	for (iter = bin_file_entries; iter->name; iter++) {
897 		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
898 			continue;
899 		if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
900 			continue;
901 		if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw)))
902 			continue;
903 
904 		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
905 		    iter->attr);
906 		if (ret)
907 			ql_log(ql_log_warn, vha, 0x00f3,
908 			    "Unable to create sysfs %s binary attribute (%d).\n",
909 			    iter->name, ret);
910 		else
911 			ql_dbg(ql_dbg_init, vha, 0x00f4,
912 			    "Successfully created sysfs %s binary attribure.\n",
913 			    iter->name);
914 	}
915 }
916 
917 void
qla2x00_free_sysfs_attr(scsi_qla_host_t * vha)918 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
919 {
920 	struct Scsi_Host *host = vha->host;
921 	struct sysfs_entry *iter;
922 	struct qla_hw_data *ha = vha->hw;
923 
924 	for (iter = bin_file_entries; iter->name; iter++) {
925 		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
926 			continue;
927 		if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
928 			continue;
929 		if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw)))
930 			continue;
931 
932 		sysfs_remove_bin_file(&host->shost_gendev.kobj,
933 		    iter->attr);
934 	}
935 
936 	if (ha->beacon_blink_led == 1)
937 		ha->isp_ops->beacon_off(vha);
938 }
939 
940 /* Scsi_Host attributes. */
941 
942 static ssize_t
qla2x00_drvr_version_show(struct device * dev,struct device_attribute * attr,char * buf)943 qla2x00_drvr_version_show(struct device *dev,
944 			  struct device_attribute *attr, char *buf)
945 {
946 	return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
947 }
948 
949 static ssize_t
qla2x00_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)950 qla2x00_fw_version_show(struct device *dev,
951 			struct device_attribute *attr, char *buf)
952 {
953 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
954 	struct qla_hw_data *ha = vha->hw;
955 	char fw_str[128];
956 
957 	return snprintf(buf, PAGE_SIZE, "%s\n",
958 	    ha->isp_ops->fw_version_str(vha, fw_str));
959 }
960 
961 static ssize_t
qla2x00_serial_num_show(struct device * dev,struct device_attribute * attr,char * buf)962 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
963 			char *buf)
964 {
965 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
966 	struct qla_hw_data *ha = vha->hw;
967 	uint32_t sn;
968 
969 	if (IS_FWI2_CAPABLE(ha)) {
970 		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
971 		return snprintf(buf, PAGE_SIZE, "%s\n", buf);
972 	}
973 
974 	sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
975 	return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
976 	    sn % 100000);
977 }
978 
979 static ssize_t
qla2x00_isp_name_show(struct device * dev,struct device_attribute * attr,char * buf)980 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
981 		      char *buf)
982 {
983 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
984 	return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
985 }
986 
987 static ssize_t
qla2x00_isp_id_show(struct device * dev,struct device_attribute * attr,char * buf)988 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
989 		    char *buf)
990 {
991 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
992 	struct qla_hw_data *ha = vha->hw;
993 	return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
994 	    ha->product_id[0], ha->product_id[1], ha->product_id[2],
995 	    ha->product_id[3]);
996 }
997 
998 static ssize_t
qla2x00_model_name_show(struct device * dev,struct device_attribute * attr,char * buf)999 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1000 			char *buf)
1001 {
1002 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1003 	return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1004 }
1005 
1006 static ssize_t
qla2x00_model_desc_show(struct device * dev,struct device_attribute * attr,char * buf)1007 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1008 			char *buf)
1009 {
1010 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1011 	return snprintf(buf, PAGE_SIZE, "%s\n",
1012 	    vha->hw->model_desc ? vha->hw->model_desc : "");
1013 }
1014 
1015 static ssize_t
qla2x00_pci_info_show(struct device * dev,struct device_attribute * attr,char * buf)1016 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1017 		      char *buf)
1018 {
1019 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1020 	char pci_info[30];
1021 
1022 	return snprintf(buf, PAGE_SIZE, "%s\n",
1023 	    vha->hw->isp_ops->pci_info_str(vha, pci_info));
1024 }
1025 
1026 static ssize_t
qla2x00_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)1027 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1028 			char *buf)
1029 {
1030 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1031 	struct qla_hw_data *ha = vha->hw;
1032 	int len = 0;
1033 
1034 	if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1035 	    atomic_read(&vha->loop_state) == LOOP_DEAD ||
1036 	    vha->device_flags & DFLG_NO_CABLE)
1037 		len = snprintf(buf, PAGE_SIZE, "Link Down\n");
1038 	else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1039 	    qla2x00_reset_active(vha))
1040 		len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1041 	else {
1042 		len = snprintf(buf, PAGE_SIZE, "Link Up - ");
1043 
1044 		switch (ha->current_topology) {
1045 		case ISP_CFG_NL:
1046 			len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1047 			break;
1048 		case ISP_CFG_FL:
1049 			len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1050 			break;
1051 		case ISP_CFG_N:
1052 			len += snprintf(buf + len, PAGE_SIZE-len,
1053 			    "N_Port to N_Port\n");
1054 			break;
1055 		case ISP_CFG_F:
1056 			len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1057 			break;
1058 		default:
1059 			len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1060 			break;
1061 		}
1062 	}
1063 	return len;
1064 }
1065 
1066 static ssize_t
qla2x00_zio_show(struct device * dev,struct device_attribute * attr,char * buf)1067 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1068 		 char *buf)
1069 {
1070 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1071 	int len = 0;
1072 
1073 	switch (vha->hw->zio_mode) {
1074 	case QLA_ZIO_MODE_6:
1075 		len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1076 		break;
1077 	case QLA_ZIO_DISABLED:
1078 		len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1079 		break;
1080 	}
1081 	return len;
1082 }
1083 
1084 static ssize_t
qla2x00_zio_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1085 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1086 		  const char *buf, size_t count)
1087 {
1088 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1089 	struct qla_hw_data *ha = vha->hw;
1090 	int val = 0;
1091 	uint16_t zio_mode;
1092 
1093 	if (!IS_ZIO_SUPPORTED(ha))
1094 		return -ENOTSUPP;
1095 
1096 	if (sscanf(buf, "%d", &val) != 1)
1097 		return -EINVAL;
1098 
1099 	if (val)
1100 		zio_mode = QLA_ZIO_MODE_6;
1101 	else
1102 		zio_mode = QLA_ZIO_DISABLED;
1103 
1104 	/* Update per-hba values and queue a reset. */
1105 	if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1106 		ha->zio_mode = zio_mode;
1107 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1108 	}
1109 	return strlen(buf);
1110 }
1111 
1112 static ssize_t
qla2x00_zio_timer_show(struct device * dev,struct device_attribute * attr,char * buf)1113 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1114 		       char *buf)
1115 {
1116 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1117 
1118 	return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1119 }
1120 
1121 static ssize_t
qla2x00_zio_timer_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1122 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1123 			const char *buf, size_t count)
1124 {
1125 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1126 	int val = 0;
1127 	uint16_t zio_timer;
1128 
1129 	if (sscanf(buf, "%d", &val) != 1)
1130 		return -EINVAL;
1131 	if (val > 25500 || val < 100)
1132 		return -ERANGE;
1133 
1134 	zio_timer = (uint16_t)(val / 100);
1135 	vha->hw->zio_timer = zio_timer;
1136 
1137 	return strlen(buf);
1138 }
1139 
1140 static ssize_t
qla2x00_beacon_show(struct device * dev,struct device_attribute * attr,char * buf)1141 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1142 		    char *buf)
1143 {
1144 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1145 	int len = 0;
1146 
1147 	if (vha->hw->beacon_blink_led)
1148 		len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1149 	else
1150 		len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1151 	return len;
1152 }
1153 
1154 static ssize_t
qla2x00_beacon_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1155 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1156 		     const char *buf, size_t count)
1157 {
1158 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1159 	struct qla_hw_data *ha = vha->hw;
1160 	int val = 0;
1161 	int rval;
1162 
1163 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
1164 		return -EPERM;
1165 
1166 	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1167 		ql_log(ql_log_warn, vha, 0x707a,
1168 		    "Abort ISP active -- ignoring beacon request.\n");
1169 		return -EBUSY;
1170 	}
1171 
1172 	if (sscanf(buf, "%d", &val) != 1)
1173 		return -EINVAL;
1174 
1175 	if (val)
1176 		rval = ha->isp_ops->beacon_on(vha);
1177 	else
1178 		rval = ha->isp_ops->beacon_off(vha);
1179 
1180 	if (rval != QLA_SUCCESS)
1181 		count = 0;
1182 
1183 	return count;
1184 }
1185 
1186 static ssize_t
qla2x00_optrom_bios_version_show(struct device * dev,struct device_attribute * attr,char * buf)1187 qla2x00_optrom_bios_version_show(struct device *dev,
1188 				 struct device_attribute *attr, char *buf)
1189 {
1190 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1191 	struct qla_hw_data *ha = vha->hw;
1192 	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1193 	    ha->bios_revision[0]);
1194 }
1195 
1196 static ssize_t
qla2x00_optrom_efi_version_show(struct device * dev,struct device_attribute * attr,char * buf)1197 qla2x00_optrom_efi_version_show(struct device *dev,
1198 				struct device_attribute *attr, char *buf)
1199 {
1200 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1201 	struct qla_hw_data *ha = vha->hw;
1202 	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1203 	    ha->efi_revision[0]);
1204 }
1205 
1206 static ssize_t
qla2x00_optrom_fcode_version_show(struct device * dev,struct device_attribute * attr,char * buf)1207 qla2x00_optrom_fcode_version_show(struct device *dev,
1208 				  struct device_attribute *attr, char *buf)
1209 {
1210 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1211 	struct qla_hw_data *ha = vha->hw;
1212 	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1213 	    ha->fcode_revision[0]);
1214 }
1215 
1216 static ssize_t
qla2x00_optrom_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1217 qla2x00_optrom_fw_version_show(struct device *dev,
1218 			       struct device_attribute *attr, char *buf)
1219 {
1220 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1221 	struct qla_hw_data *ha = vha->hw;
1222 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1223 	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1224 	    ha->fw_revision[3]);
1225 }
1226 
1227 static ssize_t
qla2x00_optrom_gold_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1228 qla2x00_optrom_gold_fw_version_show(struct device *dev,
1229     struct device_attribute *attr, char *buf)
1230 {
1231 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1232 	struct qla_hw_data *ha = vha->hw;
1233 
1234 	if (!IS_QLA81XX(ha))
1235 		return snprintf(buf, PAGE_SIZE, "\n");
1236 
1237 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1238 	    ha->gold_fw_version[0], ha->gold_fw_version[1],
1239 	    ha->gold_fw_version[2], ha->gold_fw_version[3]);
1240 }
1241 
1242 static ssize_t
qla2x00_total_isp_aborts_show(struct device * dev,struct device_attribute * attr,char * buf)1243 qla2x00_total_isp_aborts_show(struct device *dev,
1244 			      struct device_attribute *attr, char *buf)
1245 {
1246 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1247 	struct qla_hw_data *ha = vha->hw;
1248 	return snprintf(buf, PAGE_SIZE, "%d\n",
1249 	    ha->qla_stats.total_isp_aborts);
1250 }
1251 
1252 static ssize_t
qla24xx_84xx_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1253 qla24xx_84xx_fw_version_show(struct device *dev,
1254 	struct device_attribute *attr, char *buf)
1255 {
1256 	int rval = QLA_SUCCESS;
1257 	uint16_t status[2] = {0, 0};
1258 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1259 	struct qla_hw_data *ha = vha->hw;
1260 
1261 	if (!IS_QLA84XX(ha))
1262 		return snprintf(buf, PAGE_SIZE, "\n");
1263 
1264 	if (ha->cs84xx->op_fw_version == 0)
1265 		rval = qla84xx_verify_chip(vha, status);
1266 
1267 	if ((rval == QLA_SUCCESS) && (status[0] == 0))
1268 		return snprintf(buf, PAGE_SIZE, "%u\n",
1269 			(uint32_t)ha->cs84xx->op_fw_version);
1270 
1271 	return snprintf(buf, PAGE_SIZE, "\n");
1272 }
1273 
1274 static ssize_t
qla2x00_mpi_version_show(struct device * dev,struct device_attribute * attr,char * buf)1275 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1276     char *buf)
1277 {
1278 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1279 	struct qla_hw_data *ha = vha->hw;
1280 
1281 	if (!IS_QLA81XX(ha))
1282 		return snprintf(buf, PAGE_SIZE, "\n");
1283 
1284 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1285 	    ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1286 	    ha->mpi_capabilities);
1287 }
1288 
1289 static ssize_t
qla2x00_phy_version_show(struct device * dev,struct device_attribute * attr,char * buf)1290 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1291     char *buf)
1292 {
1293 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1294 	struct qla_hw_data *ha = vha->hw;
1295 
1296 	if (!IS_QLA81XX(ha))
1297 		return snprintf(buf, PAGE_SIZE, "\n");
1298 
1299 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1300 	    ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1301 }
1302 
1303 static ssize_t
qla2x00_flash_block_size_show(struct device * dev,struct device_attribute * attr,char * buf)1304 qla2x00_flash_block_size_show(struct device *dev,
1305 			      struct device_attribute *attr, char *buf)
1306 {
1307 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1308 	struct qla_hw_data *ha = vha->hw;
1309 
1310 	return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1311 }
1312 
1313 static ssize_t
qla2x00_vlan_id_show(struct device * dev,struct device_attribute * attr,char * buf)1314 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1315     char *buf)
1316 {
1317 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1318 
1319 	if (!IS_QLA8XXX_TYPE(vha->hw))
1320 		return snprintf(buf, PAGE_SIZE, "\n");
1321 
1322 	return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1323 }
1324 
1325 static ssize_t
qla2x00_vn_port_mac_address_show(struct device * dev,struct device_attribute * attr,char * buf)1326 qla2x00_vn_port_mac_address_show(struct device *dev,
1327     struct device_attribute *attr, char *buf)
1328 {
1329 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1330 
1331 	if (!IS_QLA8XXX_TYPE(vha->hw))
1332 		return snprintf(buf, PAGE_SIZE, "\n");
1333 
1334 	return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1335 	    vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1336 	    vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1337 	    vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1338 }
1339 
1340 static ssize_t
qla2x00_fabric_param_show(struct device * dev,struct device_attribute * attr,char * buf)1341 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1342     char *buf)
1343 {
1344 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1345 
1346 	return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1347 }
1348 
1349 static ssize_t
qla2x00_thermal_temp_show(struct device * dev,struct device_attribute * attr,char * buf)1350 qla2x00_thermal_temp_show(struct device *dev,
1351 	struct device_attribute *attr, char *buf)
1352 {
1353 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1354 	int rval = QLA_FUNCTION_FAILED;
1355 	uint16_t temp, frac;
1356 
1357 	if (!vha->hw->flags.thermal_supported)
1358 		return snprintf(buf, PAGE_SIZE, "\n");
1359 
1360 	temp = frac = 0;
1361 	if (qla2x00_reset_active(vha))
1362 		ql_log(ql_log_warn, vha, 0x707b,
1363 		    "ISP reset active.\n");
1364 	else if (!vha->hw->flags.eeh_busy)
1365 		rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
1366 	if (rval != QLA_SUCCESS)
1367 		temp = frac = 0;
1368 
1369 	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
1370 }
1371 
1372 static ssize_t
qla2x00_fw_state_show(struct device * dev,struct device_attribute * attr,char * buf)1373 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1374     char *buf)
1375 {
1376 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1377 	int rval = QLA_FUNCTION_FAILED;
1378 	uint16_t state[5];
1379 
1380 	if (qla2x00_reset_active(vha))
1381 		ql_log(ql_log_warn, vha, 0x707c,
1382 		    "ISP reset active.\n");
1383 	else if (!vha->hw->flags.eeh_busy)
1384 		rval = qla2x00_get_firmware_state(vha, state);
1385 	if (rval != QLA_SUCCESS)
1386 		memset(state, -1, sizeof(state));
1387 
1388 	return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1389 	    state[1], state[2], state[3], state[4]);
1390 }
1391 
1392 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1393 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1394 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1395 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1396 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1397 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1398 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1399 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
1400 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
1401 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1402 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1403 		   qla2x00_zio_timer_store);
1404 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1405 		   qla2x00_beacon_store);
1406 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1407 		   qla2x00_optrom_bios_version_show, NULL);
1408 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1409 		   qla2x00_optrom_efi_version_show, NULL);
1410 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1411 		   qla2x00_optrom_fcode_version_show, NULL);
1412 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1413 		   NULL);
1414 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
1415     qla2x00_optrom_gold_fw_version_show, NULL);
1416 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1417 		   NULL);
1418 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1419 		   NULL);
1420 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1421 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1422 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1423 		   NULL);
1424 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1425 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1426 		   qla2x00_vn_port_mac_address_show, NULL);
1427 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1428 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1429 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
1430 
1431 struct device_attribute *qla2x00_host_attrs[] = {
1432 	&dev_attr_driver_version,
1433 	&dev_attr_fw_version,
1434 	&dev_attr_serial_num,
1435 	&dev_attr_isp_name,
1436 	&dev_attr_isp_id,
1437 	&dev_attr_model_name,
1438 	&dev_attr_model_desc,
1439 	&dev_attr_pci_info,
1440 	&dev_attr_link_state,
1441 	&dev_attr_zio,
1442 	&dev_attr_zio_timer,
1443 	&dev_attr_beacon,
1444 	&dev_attr_optrom_bios_version,
1445 	&dev_attr_optrom_efi_version,
1446 	&dev_attr_optrom_fcode_version,
1447 	&dev_attr_optrom_fw_version,
1448 	&dev_attr_84xx_fw_version,
1449 	&dev_attr_total_isp_aborts,
1450 	&dev_attr_mpi_version,
1451 	&dev_attr_phy_version,
1452 	&dev_attr_flash_block_size,
1453 	&dev_attr_vlan_id,
1454 	&dev_attr_vn_port_mac_address,
1455 	&dev_attr_fabric_param,
1456 	&dev_attr_fw_state,
1457 	&dev_attr_optrom_gold_fw_version,
1458 	&dev_attr_thermal_temp,
1459 	NULL,
1460 };
1461 
1462 /* Host attributes. */
1463 
1464 static void
qla2x00_get_host_port_id(struct Scsi_Host * shost)1465 qla2x00_get_host_port_id(struct Scsi_Host *shost)
1466 {
1467 	scsi_qla_host_t *vha = shost_priv(shost);
1468 
1469 	fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1470 	    vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
1471 }
1472 
1473 static void
qla2x00_get_host_speed(struct Scsi_Host * shost)1474 qla2x00_get_host_speed(struct Scsi_Host *shost)
1475 {
1476 	struct qla_hw_data *ha = ((struct scsi_qla_host *)
1477 					(shost_priv(shost)))->hw;
1478 	u32 speed = FC_PORTSPEED_UNKNOWN;
1479 
1480 	switch (ha->link_data_rate) {
1481 	case PORT_SPEED_1GB:
1482 		speed = FC_PORTSPEED_1GBIT;
1483 		break;
1484 	case PORT_SPEED_2GB:
1485 		speed = FC_PORTSPEED_2GBIT;
1486 		break;
1487 	case PORT_SPEED_4GB:
1488 		speed = FC_PORTSPEED_4GBIT;
1489 		break;
1490 	case PORT_SPEED_8GB:
1491 		speed = FC_PORTSPEED_8GBIT;
1492 		break;
1493 	case PORT_SPEED_10GB:
1494 		speed = FC_PORTSPEED_10GBIT;
1495 		break;
1496 	}
1497 	fc_host_speed(shost) = speed;
1498 }
1499 
1500 static void
qla2x00_get_host_port_type(struct Scsi_Host * shost)1501 qla2x00_get_host_port_type(struct Scsi_Host *shost)
1502 {
1503 	scsi_qla_host_t *vha = shost_priv(shost);
1504 	uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1505 
1506 	if (vha->vp_idx) {
1507 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1508 		return;
1509 	}
1510 	switch (vha->hw->current_topology) {
1511 	case ISP_CFG_NL:
1512 		port_type = FC_PORTTYPE_LPORT;
1513 		break;
1514 	case ISP_CFG_FL:
1515 		port_type = FC_PORTTYPE_NLPORT;
1516 		break;
1517 	case ISP_CFG_N:
1518 		port_type = FC_PORTTYPE_PTP;
1519 		break;
1520 	case ISP_CFG_F:
1521 		port_type = FC_PORTTYPE_NPORT;
1522 		break;
1523 	}
1524 	fc_host_port_type(shost) = port_type;
1525 }
1526 
1527 static void
qla2x00_get_starget_node_name(struct scsi_target * starget)1528 qla2x00_get_starget_node_name(struct scsi_target *starget)
1529 {
1530 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1531 	scsi_qla_host_t *vha = shost_priv(host);
1532 	fc_port_t *fcport;
1533 	u64 node_name = 0;
1534 
1535 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1536 		if (fcport->rport &&
1537 		    starget->id == fcport->rport->scsi_target_id) {
1538 			node_name = wwn_to_u64(fcport->node_name);
1539 			break;
1540 		}
1541 	}
1542 
1543 	fc_starget_node_name(starget) = node_name;
1544 }
1545 
1546 static void
qla2x00_get_starget_port_name(struct scsi_target * starget)1547 qla2x00_get_starget_port_name(struct scsi_target *starget)
1548 {
1549 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1550 	scsi_qla_host_t *vha = shost_priv(host);
1551 	fc_port_t *fcport;
1552 	u64 port_name = 0;
1553 
1554 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1555 		if (fcport->rport &&
1556 		    starget->id == fcport->rport->scsi_target_id) {
1557 			port_name = wwn_to_u64(fcport->port_name);
1558 			break;
1559 		}
1560 	}
1561 
1562 	fc_starget_port_name(starget) = port_name;
1563 }
1564 
1565 static void
qla2x00_get_starget_port_id(struct scsi_target * starget)1566 qla2x00_get_starget_port_id(struct scsi_target *starget)
1567 {
1568 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1569 	scsi_qla_host_t *vha = shost_priv(host);
1570 	fc_port_t *fcport;
1571 	uint32_t port_id = ~0U;
1572 
1573 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1574 		if (fcport->rport &&
1575 		    starget->id == fcport->rport->scsi_target_id) {
1576 			port_id = fcport->d_id.b.domain << 16 |
1577 			    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1578 			break;
1579 		}
1580 	}
1581 
1582 	fc_starget_port_id(starget) = port_id;
1583 }
1584 
1585 static void
qla2x00_set_rport_loss_tmo(struct fc_rport * rport,uint32_t timeout)1586 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1587 {
1588 	if (timeout)
1589 		rport->dev_loss_tmo = timeout;
1590 	else
1591 		rport->dev_loss_tmo = 1;
1592 }
1593 
1594 static void
qla2x00_dev_loss_tmo_callbk(struct fc_rport * rport)1595 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1596 {
1597 	struct Scsi_Host *host = rport_to_shost(rport);
1598 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1599 	unsigned long flags;
1600 
1601 	if (!fcport)
1602 		return;
1603 
1604 	/* Now that the rport has been deleted, set the fcport state to
1605 	   FCS_DEVICE_DEAD */
1606 	qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
1607 
1608 	/*
1609 	 * Transport has effectively 'deleted' the rport, clear
1610 	 * all local references.
1611 	 */
1612 	spin_lock_irqsave(host->host_lock, flags);
1613 	fcport->rport = fcport->drport = NULL;
1614 	*((fc_port_t **)rport->dd_data) = NULL;
1615 	spin_unlock_irqrestore(host->host_lock, flags);
1616 
1617 	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1618 		return;
1619 
1620 	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1621 		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1622 		return;
1623 	}
1624 }
1625 
1626 static void
qla2x00_terminate_rport_io(struct fc_rport * rport)1627 qla2x00_terminate_rport_io(struct fc_rport *rport)
1628 {
1629 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1630 
1631 	if (!fcport)
1632 		return;
1633 
1634 	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1635 		return;
1636 
1637 	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1638 		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1639 		return;
1640 	}
1641 	/*
1642 	 * At this point all fcport's software-states are cleared.  Perform any
1643 	 * final cleanup of firmware resources (PCBs and XCBs).
1644 	 */
1645 	if (fcport->loop_id != FC_NO_LOOP_ID &&
1646 	    !test_bit(UNLOADING, &fcport->vha->dpc_flags))
1647 		fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1648 			fcport->loop_id, fcport->d_id.b.domain,
1649 			fcport->d_id.b.area, fcport->d_id.b.al_pa);
1650 }
1651 
1652 static int
qla2x00_issue_lip(struct Scsi_Host * shost)1653 qla2x00_issue_lip(struct Scsi_Host *shost)
1654 {
1655 	scsi_qla_host_t *vha = shost_priv(shost);
1656 
1657 	qla2x00_loop_reset(vha);
1658 	return 0;
1659 }
1660 
1661 static struct fc_host_statistics *
qla2x00_get_fc_host_stats(struct Scsi_Host * shost)1662 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1663 {
1664 	scsi_qla_host_t *vha = shost_priv(shost);
1665 	struct qla_hw_data *ha = vha->hw;
1666 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1667 	int rval;
1668 	struct link_statistics *stats;
1669 	dma_addr_t stats_dma;
1670 	struct fc_host_statistics *pfc_host_stat;
1671 
1672 	pfc_host_stat = &ha->fc_host_stat;
1673 	memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1674 
1675 	if (test_bit(UNLOADING, &vha->dpc_flags))
1676 		goto done;
1677 
1678 	if (unlikely(pci_channel_offline(ha->pdev)))
1679 		goto done;
1680 
1681 	stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1682 	if (stats == NULL) {
1683 		ql_log(ql_log_warn, vha, 0x707d,
1684 		    "Failed to allocate memory for stats.\n");
1685 		goto done;
1686 	}
1687 	memset(stats, 0, DMA_POOL_SIZE);
1688 
1689 	rval = QLA_FUNCTION_FAILED;
1690 	if (IS_FWI2_CAPABLE(ha)) {
1691 		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1692 	} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1693 	    !qla2x00_reset_active(vha) && !ha->dpc_active) {
1694 		/* Must be in a 'READY' state for statistics retrieval. */
1695 		rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1696 						stats, stats_dma);
1697 	}
1698 
1699 	if (rval != QLA_SUCCESS)
1700 		goto done_free;
1701 
1702 	pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1703 	pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1704 	pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1705 	pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1706 	pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1707 	pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1708 	if (IS_FWI2_CAPABLE(ha)) {
1709 		pfc_host_stat->lip_count = stats->lip_cnt;
1710 		pfc_host_stat->tx_frames = stats->tx_frames;
1711 		pfc_host_stat->rx_frames = stats->rx_frames;
1712 		pfc_host_stat->dumped_frames = stats->dumped_frames;
1713 		pfc_host_stat->nos_count = stats->nos_rcvd;
1714 	}
1715 	pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1716 	pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
1717 
1718 done_free:
1719         dma_pool_free(ha->s_dma_pool, stats, stats_dma);
1720 done:
1721 	return pfc_host_stat;
1722 }
1723 
1724 static void
qla2x00_get_host_symbolic_name(struct Scsi_Host * shost)1725 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1726 {
1727 	scsi_qla_host_t *vha = shost_priv(shost);
1728 
1729 	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1730 }
1731 
1732 static void
qla2x00_set_host_system_hostname(struct Scsi_Host * shost)1733 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1734 {
1735 	scsi_qla_host_t *vha = shost_priv(shost);
1736 
1737 	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1738 }
1739 
1740 static void
qla2x00_get_host_fabric_name(struct Scsi_Host * shost)1741 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1742 {
1743 	scsi_qla_host_t *vha = shost_priv(shost);
1744 	uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
1745 		0xFF, 0xFF, 0xFF, 0xFF};
1746 	u64 fabric_name = wwn_to_u64(node_name);
1747 
1748 	if (vha->device_flags & SWITCH_FOUND)
1749 		fabric_name = wwn_to_u64(vha->fabric_node_name);
1750 
1751 	fc_host_fabric_name(shost) = fabric_name;
1752 }
1753 
1754 static void
qla2x00_get_host_port_state(struct Scsi_Host * shost)1755 qla2x00_get_host_port_state(struct Scsi_Host *shost)
1756 {
1757 	scsi_qla_host_t *vha = shost_priv(shost);
1758 	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1759 
1760 	if (!base_vha->flags.online) {
1761 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1762 		return;
1763 	}
1764 
1765 	switch (atomic_read(&base_vha->loop_state)) {
1766 	case LOOP_UPDATE:
1767 		fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
1768 		break;
1769 	case LOOP_DOWN:
1770 		if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
1771 			fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
1772 		else
1773 			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1774 		break;
1775 	case LOOP_DEAD:
1776 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1777 		break;
1778 	case LOOP_READY:
1779 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1780 		break;
1781 	default:
1782 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1783 		break;
1784 	}
1785 }
1786 
1787 static int
qla24xx_vport_create(struct fc_vport * fc_vport,bool disable)1788 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1789 {
1790 	int	ret = 0;
1791 	uint8_t	qos = 0;
1792 	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1793 	scsi_qla_host_t *vha = NULL;
1794 	struct qla_hw_data *ha = base_vha->hw;
1795 	uint16_t options = 0;
1796 	int	cnt;
1797 	struct req_que *req = ha->req_q_map[0];
1798 
1799 	ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1800 	if (ret) {
1801 		ql_log(ql_log_warn, vha, 0x707e,
1802 		    "Vport sanity check failed, status %x\n", ret);
1803 		return (ret);
1804 	}
1805 
1806 	vha = qla24xx_create_vhost(fc_vport);
1807 	if (vha == NULL) {
1808 		ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
1809 		return FC_VPORT_FAILED;
1810 	}
1811 	if (disable) {
1812 		atomic_set(&vha->vp_state, VP_OFFLINE);
1813 		fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
1814 	} else
1815 		atomic_set(&vha->vp_state, VP_FAILED);
1816 
1817 	/* ready to create vport */
1818 	ql_log(ql_log_info, vha, 0x7080,
1819 	    "VP entry id %d assigned.\n", vha->vp_idx);
1820 
1821 	/* initialized vport states */
1822 	atomic_set(&vha->loop_state, LOOP_DOWN);
1823 	vha->vp_err_state=  VP_ERR_PORTDWN;
1824 	vha->vp_prev_err_state=  VP_ERR_UNKWN;
1825 	/* Check if physical ha port is Up */
1826 	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1827 	    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1828 		/* Don't retry or attempt login of this virtual port */
1829 		ql_dbg(ql_dbg_user, vha, 0x7081,
1830 		    "Vport loop state is not UP.\n");
1831 		atomic_set(&vha->loop_state, LOOP_DEAD);
1832 		if (!disable)
1833 			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1834 	}
1835 
1836 	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
1837 		if (ha->fw_attributes & BIT_4) {
1838 			int prot = 0;
1839 			vha->flags.difdix_supported = 1;
1840 			ql_dbg(ql_dbg_user, vha, 0x7082,
1841 			    "Registered for DIF/DIX type 1 and 3 protection.\n");
1842 			if (ql2xenabledif == 1)
1843 				prot = SHOST_DIX_TYPE0_PROTECTION;
1844 			scsi_host_set_prot(vha->host,
1845 			    prot | SHOST_DIF_TYPE1_PROTECTION
1846 			    | SHOST_DIF_TYPE2_PROTECTION
1847 			    | SHOST_DIF_TYPE3_PROTECTION
1848 			    | SHOST_DIX_TYPE1_PROTECTION
1849 			    | SHOST_DIX_TYPE2_PROTECTION
1850 			    | SHOST_DIX_TYPE3_PROTECTION);
1851 			scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
1852 		} else
1853 			vha->flags.difdix_supported = 0;
1854 	}
1855 
1856 	if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1857 				   &ha->pdev->dev)) {
1858 		ql_dbg(ql_dbg_user, vha, 0x7083,
1859 		    "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
1860 		goto vport_create_failed_2;
1861 	}
1862 
1863 	/* initialize attributes */
1864 	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1865 	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1866 	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1867 	fc_host_supported_classes(vha->host) =
1868 		fc_host_supported_classes(base_vha->host);
1869 	fc_host_supported_speeds(vha->host) =
1870 		fc_host_supported_speeds(base_vha->host);
1871 
1872 	qla24xx_vport_disable(fc_vport, disable);
1873 
1874 	if (ha->flags.cpu_affinity_enabled) {
1875 		req = ha->req_q_map[1];
1876 		ql_dbg(ql_dbg_multiq, vha, 0xc000,
1877 		    "Request queue %p attached with "
1878 		    "VP[%d], cpu affinity =%d\n",
1879 		    req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
1880 		goto vport_queue;
1881 	} else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1882 		goto vport_queue;
1883 	/* Create a request queue in QoS mode for the vport */
1884 	for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1885 		if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1886 			&& memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1887 					8) == 0) {
1888 			qos = ha->npiv_info[cnt].q_qos;
1889 			break;
1890 		}
1891 	}
1892 	if (qos) {
1893 		ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1894 			qos);
1895 		if (!ret)
1896 			ql_log(ql_log_warn, vha, 0x7084,
1897 			    "Can't create request queue for VP[%d]\n",
1898 			    vha->vp_idx);
1899 		else {
1900 			ql_dbg(ql_dbg_multiq, vha, 0xc001,
1901 			    "Request Que:%d Q0s: %d) created for VP[%d]\n",
1902 			    ret, qos, vha->vp_idx);
1903 			ql_dbg(ql_dbg_user, vha, 0x7085,
1904 			    "Request Que:%d Q0s: %d) created for VP[%d]\n",
1905 			    ret, qos, vha->vp_idx);
1906 			req = ha->req_q_map[ret];
1907 		}
1908 	}
1909 
1910 vport_queue:
1911 	vha->req = req;
1912 	return 0;
1913 
1914 vport_create_failed_2:
1915 	qla24xx_disable_vp(vha);
1916 	qla24xx_deallocate_vp_id(vha);
1917 	scsi_host_put(vha->host);
1918 	return FC_VPORT_FAILED;
1919 }
1920 
1921 static int
qla24xx_vport_delete(struct fc_vport * fc_vport)1922 qla24xx_vport_delete(struct fc_vport *fc_vport)
1923 {
1924 	scsi_qla_host_t *vha = fc_vport->dd_data;
1925 	struct qla_hw_data *ha = vha->hw;
1926 	uint16_t id = vha->vp_idx;
1927 
1928 	while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1929 	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
1930 		msleep(1000);
1931 
1932 	qla24xx_disable_vp(vha);
1933 
1934 	vha->flags.delete_progress = 1;
1935 
1936 	fc_remove_host(vha->host);
1937 
1938 	scsi_remove_host(vha->host);
1939 
1940 	/* Allow timer to run to drain queued items, when removing vp */
1941 	qla24xx_deallocate_vp_id(vha);
1942 
1943 	if (vha->timer_active) {
1944 		qla2x00_vp_stop_timer(vha);
1945 		ql_dbg(ql_dbg_user, vha, 0x7086,
1946 		    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
1947 	}
1948 
1949 	/* No pending activities shall be there on the vha now */
1950 	if (ql2xextended_error_logging & ql_dbg_user)
1951 		msleep(random32()%10);  /* Just to see if something falls on
1952 					* the net we have placed below */
1953 
1954 	BUG_ON(atomic_read(&vha->vref_count));
1955 
1956 	qla2x00_free_fcports(vha);
1957 
1958 	mutex_lock(&ha->vport_lock);
1959 	ha->cur_vport_count--;
1960 	clear_bit(vha->vp_idx, ha->vp_idx_map);
1961 	mutex_unlock(&ha->vport_lock);
1962 
1963 	if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1964 		if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1965 			ql_log(ql_log_warn, vha, 0x7087,
1966 			    "Queue delete failed.\n");
1967 	}
1968 
1969 	ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
1970 	scsi_host_put(vha->host);
1971 	return 0;
1972 }
1973 
1974 static int
qla24xx_vport_disable(struct fc_vport * fc_vport,bool disable)1975 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1976 {
1977 	scsi_qla_host_t *vha = fc_vport->dd_data;
1978 
1979 	if (disable)
1980 		qla24xx_disable_vp(vha);
1981 	else
1982 		qla24xx_enable_vp(vha);
1983 
1984 	return 0;
1985 }
1986 
1987 struct fc_function_template qla2xxx_transport_functions = {
1988 
1989 	.show_host_node_name = 1,
1990 	.show_host_port_name = 1,
1991 	.show_host_supported_classes = 1,
1992 	.show_host_supported_speeds = 1,
1993 
1994 	.get_host_port_id = qla2x00_get_host_port_id,
1995 	.show_host_port_id = 1,
1996 	.get_host_speed = qla2x00_get_host_speed,
1997 	.show_host_speed = 1,
1998 	.get_host_port_type = qla2x00_get_host_port_type,
1999 	.show_host_port_type = 1,
2000 	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2001 	.show_host_symbolic_name = 1,
2002 	.set_host_system_hostname = qla2x00_set_host_system_hostname,
2003 	.show_host_system_hostname = 1,
2004 	.get_host_fabric_name = qla2x00_get_host_fabric_name,
2005 	.show_host_fabric_name = 1,
2006 	.get_host_port_state = qla2x00_get_host_port_state,
2007 	.show_host_port_state = 1,
2008 
2009 	.dd_fcrport_size = sizeof(struct fc_port *),
2010 	.show_rport_supported_classes = 1,
2011 
2012 	.get_starget_node_name = qla2x00_get_starget_node_name,
2013 	.show_starget_node_name = 1,
2014 	.get_starget_port_name = qla2x00_get_starget_port_name,
2015 	.show_starget_port_name = 1,
2016 	.get_starget_port_id  = qla2x00_get_starget_port_id,
2017 	.show_starget_port_id = 1,
2018 
2019 	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2020 	.show_rport_dev_loss_tmo = 1,
2021 
2022 	.issue_fc_host_lip = qla2x00_issue_lip,
2023 	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2024 	.terminate_rport_io = qla2x00_terminate_rport_io,
2025 	.get_fc_host_stats = qla2x00_get_fc_host_stats,
2026 
2027 	.vport_create = qla24xx_vport_create,
2028 	.vport_disable = qla24xx_vport_disable,
2029 	.vport_delete = qla24xx_vport_delete,
2030 	.bsg_request = qla24xx_bsg_request,
2031 	.bsg_timeout = qla24xx_bsg_timeout,
2032 };
2033 
2034 struct fc_function_template qla2xxx_transport_vport_functions = {
2035 
2036 	.show_host_node_name = 1,
2037 	.show_host_port_name = 1,
2038 	.show_host_supported_classes = 1,
2039 
2040 	.get_host_port_id = qla2x00_get_host_port_id,
2041 	.show_host_port_id = 1,
2042 	.get_host_speed = qla2x00_get_host_speed,
2043 	.show_host_speed = 1,
2044 	.get_host_port_type = qla2x00_get_host_port_type,
2045 	.show_host_port_type = 1,
2046 	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2047 	.show_host_symbolic_name = 1,
2048 	.set_host_system_hostname = qla2x00_set_host_system_hostname,
2049 	.show_host_system_hostname = 1,
2050 	.get_host_fabric_name = qla2x00_get_host_fabric_name,
2051 	.show_host_fabric_name = 1,
2052 	.get_host_port_state = qla2x00_get_host_port_state,
2053 	.show_host_port_state = 1,
2054 
2055 	.dd_fcrport_size = sizeof(struct fc_port *),
2056 	.show_rport_supported_classes = 1,
2057 
2058 	.get_starget_node_name = qla2x00_get_starget_node_name,
2059 	.show_starget_node_name = 1,
2060 	.get_starget_port_name = qla2x00_get_starget_port_name,
2061 	.show_starget_port_name = 1,
2062 	.get_starget_port_id  = qla2x00_get_starget_port_id,
2063 	.show_starget_port_id = 1,
2064 
2065 	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2066 	.show_rport_dev_loss_tmo = 1,
2067 
2068 	.issue_fc_host_lip = qla2x00_issue_lip,
2069 	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2070 	.terminate_rport_io = qla2x00_terminate_rport_io,
2071 	.get_fc_host_stats = qla2x00_get_fc_host_stats,
2072 	.bsg_request = qla24xx_bsg_request,
2073 	.bsg_timeout = qla24xx_bsg_timeout,
2074 };
2075 
2076 void
qla2x00_init_host_attr(scsi_qla_host_t * vha)2077 qla2x00_init_host_attr(scsi_qla_host_t *vha)
2078 {
2079 	struct qla_hw_data *ha = vha->hw;
2080 	u32 speed = FC_PORTSPEED_UNKNOWN;
2081 
2082 	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2083 	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2084 	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2085 	fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
2086 	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2087 	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2088 
2089 	if (IS_QLA8XXX_TYPE(ha))
2090 		speed = FC_PORTSPEED_10GBIT;
2091 	else if (IS_QLA25XX(ha))
2092 		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2093 		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2094 	else if (IS_QLA24XX_TYPE(ha))
2095 		speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
2096 		    FC_PORTSPEED_1GBIT;
2097 	else if (IS_QLA23XX(ha))
2098 		speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2099 	else
2100 		speed = FC_PORTSPEED_1GBIT;
2101 	fc_host_supported_speeds(vha->host) = speed;
2102 }
2103