1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 
13 /* BSG support for ELS/CT pass through */
14 inline srb_t *
qla2x00_get_ctx_bsg_sp(scsi_qla_host_t * vha,fc_port_t * fcport,size_t size)15 qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
16 {
17 	srb_t *sp;
18 	struct qla_hw_data *ha = vha->hw;
19 	struct srb_ctx *ctx;
20 
21 	sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
22 	if (!sp)
23 		goto done;
24 	ctx = kzalloc(size, GFP_KERNEL);
25 	if (!ctx) {
26 		mempool_free(sp, ha->srb_mempool);
27 		sp = NULL;
28 		goto done;
29 	}
30 
31 	memset(sp, 0, sizeof(*sp));
32 	sp->fcport = fcport;
33 	sp->ctx = ctx;
34 	ctx->iocbs = 1;
35 done:
36 	return sp;
37 }
38 
39 int
qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t * vha,struct qla_fcp_prio_cfg * pri_cfg,uint8_t flag)40 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
41 	struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
42 {
43 	int i, ret, num_valid;
44 	uint8_t *bcode;
45 	struct qla_fcp_prio_entry *pri_entry;
46 	uint32_t *bcode_val_ptr, bcode_val;
47 
48 	ret = 1;
49 	num_valid = 0;
50 	bcode = (uint8_t *)pri_cfg;
51 	bcode_val_ptr = (uint32_t *)pri_cfg;
52 	bcode_val = (uint32_t)(*bcode_val_ptr);
53 
54 	if (bcode_val == 0xFFFFFFFF) {
55 		/* No FCP Priority config data in flash */
56 		ql_dbg(ql_dbg_user, vha, 0x7051,
57 		    "No FCP Priority config data.\n");
58 		return 0;
59 	}
60 
61 	if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
62 			bcode[3] != 'S') {
63 		/* Invalid FCP priority data header*/
64 		ql_dbg(ql_dbg_user, vha, 0x7052,
65 		    "Invalid FCP Priority data header. bcode=0x%x.\n",
66 		    bcode_val);
67 		return 0;
68 	}
69 	if (flag != 1)
70 		return ret;
71 
72 	pri_entry = &pri_cfg->entry[0];
73 	for (i = 0; i < pri_cfg->num_entries; i++) {
74 		if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
75 			num_valid++;
76 		pri_entry++;
77 	}
78 
79 	if (num_valid == 0) {
80 		/* No valid FCP priority data entries */
81 		ql_dbg(ql_dbg_user, vha, 0x7053,
82 		    "No valid FCP Priority data entries.\n");
83 		ret = 0;
84 	} else {
85 		/* FCP priority data is valid */
86 		ql_dbg(ql_dbg_user, vha, 0x7054,
87 		    "Valid FCP priority data. num entries = %d.\n",
88 		    num_valid);
89 	}
90 
91 	return ret;
92 }
93 
94 static int
qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job * bsg_job)95 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
96 {
97 	struct Scsi_Host *host = bsg_job->shost;
98 	scsi_qla_host_t *vha = shost_priv(host);
99 	struct qla_hw_data *ha = vha->hw;
100 	int ret = 0;
101 	uint32_t len;
102 	uint32_t oper;
103 
104 	bsg_job->reply->reply_payload_rcv_len = 0;
105 
106 	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
107 		ret = -EINVAL;
108 		goto exit_fcp_prio_cfg;
109 	}
110 
111 	/* Get the sub command */
112 	oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
113 
114 	/* Only set config is allowed if config memory is not allocated */
115 	if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
116 		ret = -EINVAL;
117 		goto exit_fcp_prio_cfg;
118 	}
119 	switch (oper) {
120 	case QLFC_FCP_PRIO_DISABLE:
121 		if (ha->flags.fcp_prio_enabled) {
122 			ha->flags.fcp_prio_enabled = 0;
123 			ha->fcp_prio_cfg->attributes &=
124 				~FCP_PRIO_ATTR_ENABLE;
125 			qla24xx_update_all_fcp_prio(vha);
126 			bsg_job->reply->result = DID_OK;
127 		} else {
128 			ret = -EINVAL;
129 			bsg_job->reply->result = (DID_ERROR << 16);
130 			goto exit_fcp_prio_cfg;
131 		}
132 		break;
133 
134 	case QLFC_FCP_PRIO_ENABLE:
135 		if (!ha->flags.fcp_prio_enabled) {
136 			if (ha->fcp_prio_cfg) {
137 				ha->flags.fcp_prio_enabled = 1;
138 				ha->fcp_prio_cfg->attributes |=
139 				    FCP_PRIO_ATTR_ENABLE;
140 				qla24xx_update_all_fcp_prio(vha);
141 				bsg_job->reply->result = DID_OK;
142 			} else {
143 				ret = -EINVAL;
144 				bsg_job->reply->result = (DID_ERROR << 16);
145 				goto exit_fcp_prio_cfg;
146 			}
147 		}
148 		break;
149 
150 	case QLFC_FCP_PRIO_GET_CONFIG:
151 		len = bsg_job->reply_payload.payload_len;
152 		if (!len || len > FCP_PRIO_CFG_SIZE) {
153 			ret = -EINVAL;
154 			bsg_job->reply->result = (DID_ERROR << 16);
155 			goto exit_fcp_prio_cfg;
156 		}
157 
158 		bsg_job->reply->result = DID_OK;
159 		bsg_job->reply->reply_payload_rcv_len =
160 			sg_copy_from_buffer(
161 			bsg_job->reply_payload.sg_list,
162 			bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
163 			len);
164 
165 		break;
166 
167 	case QLFC_FCP_PRIO_SET_CONFIG:
168 		len = bsg_job->request_payload.payload_len;
169 		if (!len || len > FCP_PRIO_CFG_SIZE) {
170 			bsg_job->reply->result = (DID_ERROR << 16);
171 			ret = -EINVAL;
172 			goto exit_fcp_prio_cfg;
173 		}
174 
175 		if (!ha->fcp_prio_cfg) {
176 			ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
177 			if (!ha->fcp_prio_cfg) {
178 				ql_log(ql_log_warn, vha, 0x7050,
179 				    "Unable to allocate memory for fcp prio "
180 				    "config data (%x).\n", FCP_PRIO_CFG_SIZE);
181 				bsg_job->reply->result = (DID_ERROR << 16);
182 				ret = -ENOMEM;
183 				goto exit_fcp_prio_cfg;
184 			}
185 		}
186 
187 		memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
188 		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
189 		bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
190 			FCP_PRIO_CFG_SIZE);
191 
192 		/* validate fcp priority data */
193 
194 		if (!qla24xx_fcp_prio_cfg_valid(vha,
195 		    (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
196 			bsg_job->reply->result = (DID_ERROR << 16);
197 			ret = -EINVAL;
198 			/* If buffer was invalidatic int
199 			 * fcp_prio_cfg is of no use
200 			 */
201 			vfree(ha->fcp_prio_cfg);
202 			ha->fcp_prio_cfg = NULL;
203 			goto exit_fcp_prio_cfg;
204 		}
205 
206 		ha->flags.fcp_prio_enabled = 0;
207 		if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
208 			ha->flags.fcp_prio_enabled = 1;
209 		qla24xx_update_all_fcp_prio(vha);
210 		bsg_job->reply->result = DID_OK;
211 		break;
212 	default:
213 		ret = -EINVAL;
214 		break;
215 	}
216 exit_fcp_prio_cfg:
217 	bsg_job->job_done(bsg_job);
218 	return ret;
219 }
220 static int
qla2x00_process_els(struct fc_bsg_job * bsg_job)221 qla2x00_process_els(struct fc_bsg_job *bsg_job)
222 {
223 	struct fc_rport *rport;
224 	fc_port_t *fcport = NULL;
225 	struct Scsi_Host *host;
226 	scsi_qla_host_t *vha;
227 	struct qla_hw_data *ha;
228 	srb_t *sp;
229 	const char *type;
230 	int req_sg_cnt, rsp_sg_cnt;
231 	int rval =  (DRIVER_ERROR << 16);
232 	uint16_t nextlid = 0;
233 	struct srb_ctx *els;
234 
235 	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
236 		rport = bsg_job->rport;
237 		fcport = *(fc_port_t **) rport->dd_data;
238 		host = rport_to_shost(rport);
239 		vha = shost_priv(host);
240 		ha = vha->hw;
241 		type = "FC_BSG_RPT_ELS";
242 	} else {
243 		host = bsg_job->shost;
244 		vha = shost_priv(host);
245 		ha = vha->hw;
246 		type = "FC_BSG_HST_ELS_NOLOGIN";
247 	}
248 
249 	/* pass through is supported only for ISP 4Gb or higher */
250 	if (!IS_FWI2_CAPABLE(ha)) {
251 		ql_dbg(ql_dbg_user, vha, 0x7001,
252 		    "ELS passthru not supported for ISP23xx based adapters.\n");
253 		rval = -EPERM;
254 		goto done;
255 	}
256 
257 	/*  Multiple SG's are not supported for ELS requests */
258 	if (bsg_job->request_payload.sg_cnt > 1 ||
259 		bsg_job->reply_payload.sg_cnt > 1) {
260 		ql_dbg(ql_dbg_user, vha, 0x7002,
261 		    "Multiple SG's are not suppored for ELS requests, "
262 		    "request_sg_cnt=%x reply_sg_cnt=%x.\n",
263 		    bsg_job->request_payload.sg_cnt,
264 		    bsg_job->reply_payload.sg_cnt);
265 		rval = -EPERM;
266 		goto done;
267 	}
268 
269 	/* ELS request for rport */
270 	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
271 		/* make sure the rport is logged in,
272 		 * if not perform fabric login
273 		 */
274 		if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
275 			ql_dbg(ql_dbg_user, vha, 0x7003,
276 			    "Failed to login port %06X for ELS passthru.\n",
277 			    fcport->d_id.b24);
278 			rval = -EIO;
279 			goto done;
280 		}
281 	} else {
282 		/* Allocate a dummy fcport structure, since functions
283 		 * preparing the IOCB and mailbox command retrieves port
284 		 * specific information from fcport structure. For Host based
285 		 * ELS commands there will be no fcport structure allocated
286 		 */
287 		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
288 		if (!fcport) {
289 			rval = -ENOMEM;
290 			goto done;
291 		}
292 
293 		/* Initialize all required  fields of fcport */
294 		fcport->vha = vha;
295 		fcport->vp_idx = vha->vp_idx;
296 		fcport->d_id.b.al_pa =
297 			bsg_job->request->rqst_data.h_els.port_id[0];
298 		fcport->d_id.b.area =
299 			bsg_job->request->rqst_data.h_els.port_id[1];
300 		fcport->d_id.b.domain =
301 			bsg_job->request->rqst_data.h_els.port_id[2];
302 		fcport->loop_id =
303 			(fcport->d_id.b.al_pa == 0xFD) ?
304 			NPH_FABRIC_CONTROLLER : NPH_F_PORT;
305 	}
306 
307 	if (!vha->flags.online) {
308 		ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
309 		rval = -EIO;
310 		goto done;
311 	}
312 
313 	req_sg_cnt =
314 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
315 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
316 	if (!req_sg_cnt) {
317 		rval = -ENOMEM;
318 		goto done_free_fcport;
319 	}
320 
321 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
322 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
323         if (!rsp_sg_cnt) {
324 		rval = -ENOMEM;
325 		goto done_free_fcport;
326 	}
327 
328 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
329 		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
330 		ql_log(ql_log_warn, vha, 0x7008,
331 		    "dma mapping resulted in different sg counts, "
332 		    "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
333 		    "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
334 		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
335 		rval = -EAGAIN;
336 		goto done_unmap_sg;
337 	}
338 
339 	/* Alloc SRB structure */
340 	sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
341 	if (!sp) {
342 		rval = -ENOMEM;
343 		goto done_unmap_sg;
344 	}
345 
346 	els = sp->ctx;
347 	els->type =
348 		(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
349 		SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
350 	els->name =
351 		(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
352 		"bsg_els_rpt" : "bsg_els_hst");
353 	els->u.bsg_job = bsg_job;
354 
355 	ql_dbg(ql_dbg_user, vha, 0x700a,
356 	    "bsg rqst type: %s els type: %x - loop-id=%x "
357 	    "portid=%-2x%02x%02x.\n", type,
358 	    bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
359 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
360 
361 	rval = qla2x00_start_sp(sp);
362 	if (rval != QLA_SUCCESS) {
363 		ql_log(ql_log_warn, vha, 0x700e,
364 		    "qla2x00_start_sp failed = %d\n", rval);
365 		kfree(sp->ctx);
366 		mempool_free(sp, ha->srb_mempool);
367 		rval = -EIO;
368 		goto done_unmap_sg;
369 	}
370 	return rval;
371 
372 done_unmap_sg:
373 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
374 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
375 	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
376 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
377 	goto done_free_fcport;
378 
379 done_free_fcport:
380 	if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
381 		kfree(fcport);
382 done:
383 	return rval;
384 }
385 
386 inline uint16_t
qla24xx_calc_ct_iocbs(uint16_t dsds)387 qla24xx_calc_ct_iocbs(uint16_t dsds)
388 {
389 	uint16_t iocbs;
390 
391 	iocbs = 1;
392 	if (dsds > 2) {
393 		iocbs += (dsds - 2) / 5;
394 		if ((dsds - 2) % 5)
395 			iocbs++;
396 	}
397 	return iocbs;
398 }
399 
400 static int
qla2x00_process_ct(struct fc_bsg_job * bsg_job)401 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
402 {
403 	srb_t *sp;
404 	struct Scsi_Host *host = bsg_job->shost;
405 	scsi_qla_host_t *vha = shost_priv(host);
406 	struct qla_hw_data *ha = vha->hw;
407 	int rval = (DRIVER_ERROR << 16);
408 	int req_sg_cnt, rsp_sg_cnt;
409 	uint16_t loop_id;
410 	struct fc_port *fcport;
411 	char  *type = "FC_BSG_HST_CT";
412 	struct srb_ctx *ct;
413 
414 	req_sg_cnt =
415 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
416 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
417 	if (!req_sg_cnt) {
418 		ql_log(ql_log_warn, vha, 0x700f,
419 		    "dma_map_sg return %d for request\n", req_sg_cnt);
420 		rval = -ENOMEM;
421 		goto done;
422 	}
423 
424 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
425 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
426 	if (!rsp_sg_cnt) {
427 		ql_log(ql_log_warn, vha, 0x7010,
428 		    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
429 		rval = -ENOMEM;
430 		goto done;
431 	}
432 
433 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
434 	    (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
435 		ql_log(ql_log_warn, vha, 0x7011,
436 		    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
437 		    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
438 		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
439 		rval = -EAGAIN;
440 		goto done_unmap_sg;
441 	}
442 
443 	if (!vha->flags.online) {
444 		ql_log(ql_log_warn, vha, 0x7012,
445 		    "Host is not online.\n");
446 		rval = -EIO;
447 		goto done_unmap_sg;
448 	}
449 
450 	loop_id =
451 		(bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
452 			>> 24;
453 	switch (loop_id) {
454 	case 0xFC:
455 		loop_id = cpu_to_le16(NPH_SNS);
456 		break;
457 	case 0xFA:
458 		loop_id = vha->mgmt_svr_loop_id;
459 		break;
460 	default:
461 		ql_dbg(ql_dbg_user, vha, 0x7013,
462 		    "Unknown loop id: %x.\n", loop_id);
463 		rval = -EINVAL;
464 		goto done_unmap_sg;
465 	}
466 
467 	/* Allocate a dummy fcport structure, since functions preparing the
468 	 * IOCB and mailbox command retrieves port specific information
469 	 * from fcport structure. For Host based ELS commands there will be
470 	 * no fcport structure allocated
471 	 */
472 	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
473 	if (!fcport) {
474 		ql_log(ql_log_warn, vha, 0x7014,
475 		    "Failed to allocate fcport.\n");
476 		rval = -ENOMEM;
477 		goto done_unmap_sg;
478 	}
479 
480 	/* Initialize all required  fields of fcport */
481 	fcport->vha = vha;
482 	fcport->vp_idx = vha->vp_idx;
483 	fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
484 	fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
485 	fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
486 	fcport->loop_id = loop_id;
487 
488 	/* Alloc SRB structure */
489 	sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
490 	if (!sp) {
491 		ql_log(ql_log_warn, vha, 0x7015,
492 		    "qla2x00_get_ctx_bsg_sp failed.\n");
493 		rval = -ENOMEM;
494 		goto done_free_fcport;
495 	}
496 
497 	ct = sp->ctx;
498 	ct->type = SRB_CT_CMD;
499 	ct->name = "bsg_ct";
500 	ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
501 	ct->u.bsg_job = bsg_job;
502 
503 	ql_dbg(ql_dbg_user, vha, 0x7016,
504 	    "bsg rqst type: %s else type: %x - "
505 	    "loop-id=%x portid=%02x%02x%02x.\n", type,
506 	    (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
507 	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
508 	    fcport->d_id.b.al_pa);
509 
510 	rval = qla2x00_start_sp(sp);
511 	if (rval != QLA_SUCCESS) {
512 		ql_log(ql_log_warn, vha, 0x7017,
513 		    "qla2x00_start_sp failed=%d.\n", rval);
514 		kfree(sp->ctx);
515 		mempool_free(sp, ha->srb_mempool);
516 		rval = -EIO;
517 		goto done_free_fcport;
518 	}
519 	return rval;
520 
521 done_free_fcport:
522 	kfree(fcport);
523 done_unmap_sg:
524 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
525 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
526 	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
527 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
528 done:
529 	return rval;
530 }
531 
532 /* Set the port configuration to enable the
533  * internal loopback on ISP81XX
534  */
535 static inline int
qla81xx_set_internal_loopback(scsi_qla_host_t * vha,uint16_t * config,uint16_t * new_config)536 qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
537     uint16_t *new_config)
538 {
539 	int ret = 0;
540 	int rval = 0;
541 	struct qla_hw_data *ha = vha->hw;
542 
543 	if (!IS_QLA81XX(ha))
544 		goto done_set_internal;
545 
546 	new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
547 	memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
548 
549 	ha->notify_dcbx_comp = 1;
550 	ret = qla81xx_set_port_config(vha, new_config);
551 	if (ret != QLA_SUCCESS) {
552 		ql_log(ql_log_warn, vha, 0x7021,
553 		    "set port config failed.\n");
554 		ha->notify_dcbx_comp = 0;
555 		rval = -EINVAL;
556 		goto done_set_internal;
557 	}
558 
559 	/* Wait for DCBX complete event */
560 	if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
561 		ql_dbg(ql_dbg_user, vha, 0x7022,
562 		    "State change notification not received.\n");
563 	} else
564 		ql_dbg(ql_dbg_user, vha, 0x7023,
565 		    "State change received.\n");
566 
567 	ha->notify_dcbx_comp = 0;
568 
569 done_set_internal:
570 	return rval;
571 }
572 
573 /* Set the port configuration to disable the
574  * internal loopback on ISP81XX
575  */
576 static inline int
qla81xx_reset_internal_loopback(scsi_qla_host_t * vha,uint16_t * config,int wait)577 qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
578     int wait)
579 {
580 	int ret = 0;
581 	int rval = 0;
582 	uint16_t new_config[4];
583 	struct qla_hw_data *ha = vha->hw;
584 
585 	if (!IS_QLA81XX(ha))
586 		goto done_reset_internal;
587 
588 	memset(new_config, 0 , sizeof(new_config));
589 	if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
590 			ENABLE_INTERNAL_LOOPBACK) {
591 		new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
592 		memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
593 
594 		ha->notify_dcbx_comp = wait;
595 		ret = qla81xx_set_port_config(vha, new_config);
596 		if (ret != QLA_SUCCESS) {
597 			ql_log(ql_log_warn, vha, 0x7025,
598 			    "Set port config failed.\n");
599 			ha->notify_dcbx_comp = 0;
600 			rval = -EINVAL;
601 			goto done_reset_internal;
602 		}
603 
604 		/* Wait for DCBX complete event */
605 		if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
606 			(20 * HZ))) {
607 			ql_dbg(ql_dbg_user, vha, 0x7026,
608 			    "State change notification not received.\n");
609 			ha->notify_dcbx_comp = 0;
610 			rval = -EINVAL;
611 			goto done_reset_internal;
612 		} else
613 			ql_dbg(ql_dbg_user, vha, 0x7027,
614 			    "State change received.\n");
615 
616 		ha->notify_dcbx_comp = 0;
617 	}
618 done_reset_internal:
619 	return rval;
620 }
621 
622 static int
qla2x00_process_loopback(struct fc_bsg_job * bsg_job)623 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
624 {
625 	struct Scsi_Host *host = bsg_job->shost;
626 	scsi_qla_host_t *vha = shost_priv(host);
627 	struct qla_hw_data *ha = vha->hw;
628 	int rval;
629 	uint8_t command_sent;
630 	char *type;
631 	struct msg_echo_lb elreq;
632 	uint16_t response[MAILBOX_REGISTER_COUNT];
633 	uint16_t config[4], new_config[4];
634 	uint8_t *fw_sts_ptr;
635 	uint8_t *req_data = NULL;
636 	dma_addr_t req_data_dma;
637 	uint32_t req_data_len;
638 	uint8_t *rsp_data = NULL;
639 	dma_addr_t rsp_data_dma;
640 	uint32_t rsp_data_len;
641 
642 	if (!vha->flags.online) {
643 		ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
644 		return -EIO;
645 	}
646 
647 	elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
648 		bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
649 		DMA_TO_DEVICE);
650 
651 	if (!elreq.req_sg_cnt) {
652 		ql_log(ql_log_warn, vha, 0x701a,
653 		    "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
654 		return -ENOMEM;
655 	}
656 
657 	elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
658 		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
659 		DMA_FROM_DEVICE);
660 
661 	if (!elreq.rsp_sg_cnt) {
662 		ql_log(ql_log_warn, vha, 0x701b,
663 		    "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
664 		rval = -ENOMEM;
665 		goto done_unmap_req_sg;
666 	}
667 
668 	if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
669 		(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
670 		ql_log(ql_log_warn, vha, 0x701c,
671 		    "dma mapping resulted in different sg counts, "
672 		    "request_sg_cnt: %x dma_request_sg_cnt: %x "
673 		    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
674 		    bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
675 		    bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
676 		rval = -EAGAIN;
677 		goto done_unmap_sg;
678 	}
679 	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
680 	req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
681 		&req_data_dma, GFP_KERNEL);
682 	if (!req_data) {
683 		ql_log(ql_log_warn, vha, 0x701d,
684 		    "dma alloc failed for req_data.\n");
685 		rval = -ENOMEM;
686 		goto done_unmap_sg;
687 	}
688 
689 	rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
690 		&rsp_data_dma, GFP_KERNEL);
691 	if (!rsp_data) {
692 		ql_log(ql_log_warn, vha, 0x7004,
693 		    "dma alloc failed for rsp_data.\n");
694 		rval = -ENOMEM;
695 		goto done_free_dma_req;
696 	}
697 
698 	/* Copy the request buffer in req_data now */
699 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
700 		bsg_job->request_payload.sg_cnt, req_data, req_data_len);
701 
702 	elreq.send_dma = req_data_dma;
703 	elreq.rcv_dma = rsp_data_dma;
704 	elreq.transfer_size = req_data_len;
705 
706 	elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
707 
708 	if ((ha->current_topology == ISP_CFG_F ||
709 	    (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
710 	    (IS_QLA81XX(ha) &&
711 	    le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
712 	    && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
713 		elreq.options == EXTERNAL_LOOPBACK) {
714 		type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
715 		ql_dbg(ql_dbg_user, vha, 0x701e,
716 		    "BSG request type: %s.\n", type);
717 		command_sent = INT_DEF_LB_ECHO_CMD;
718 		rval = qla2x00_echo_test(vha, &elreq, response);
719 	} else {
720 		if (IS_QLA81XX(ha)) {
721 			memset(config, 0, sizeof(config));
722 			memset(new_config, 0, sizeof(new_config));
723 			if (qla81xx_get_port_config(vha, config)) {
724 				ql_log(ql_log_warn, vha, 0x701f,
725 				    "Get port config failed.\n");
726 				bsg_job->reply->reply_payload_rcv_len = 0;
727 				bsg_job->reply->result = (DID_ERROR << 16);
728 				rval = -EPERM;
729 				goto done_free_dma_req;
730 			}
731 
732 			if (elreq.options != EXTERNAL_LOOPBACK) {
733 				ql_dbg(ql_dbg_user, vha, 0x7020,
734 				    "Internal: curent port config = %x\n",
735 				    config[0]);
736 				if (qla81xx_set_internal_loopback(vha, config,
737 					new_config)) {
738 					ql_log(ql_log_warn, vha, 0x7024,
739 					    "Internal loopback failed.\n");
740 					bsg_job->reply->reply_payload_rcv_len =
741 						0;
742 					bsg_job->reply->result =
743 						(DID_ERROR << 16);
744 					rval = -EPERM;
745 					goto done_free_dma_req;
746 				}
747 			} else {
748 				/* For external loopback to work
749 				 * ensure internal loopback is disabled
750 				 */
751 				if (qla81xx_reset_internal_loopback(vha,
752 					config, 1)) {
753 					bsg_job->reply->reply_payload_rcv_len =
754 						0;
755 					bsg_job->reply->result =
756 						(DID_ERROR << 16);
757 					rval = -EPERM;
758 					goto done_free_dma_req;
759 				}
760 			}
761 
762 			type = "FC_BSG_HST_VENDOR_LOOPBACK";
763 			ql_dbg(ql_dbg_user, vha, 0x7028,
764 			    "BSG request type: %s.\n", type);
765 
766 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
767 			rval = qla2x00_loopback_test(vha, &elreq, response);
768 
769 			if (new_config[0]) {
770 				/* Revert back to original port config
771 				 * Also clear internal loopback
772 				 */
773 				qla81xx_reset_internal_loopback(vha,
774 				    new_config, 0);
775 			}
776 
777 			if (response[0] == MBS_COMMAND_ERROR &&
778 					response[1] == MBS_LB_RESET) {
779 				ql_log(ql_log_warn, vha, 0x7029,
780 				    "MBX command error, Aborting ISP.\n");
781 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
782 				qla2xxx_wake_dpc(vha);
783 				qla2x00_wait_for_chip_reset(vha);
784 				/* Also reset the MPI */
785 				if (qla81xx_restart_mpi_firmware(vha) !=
786 				    QLA_SUCCESS) {
787 					ql_log(ql_log_warn, vha, 0x702a,
788 					    "MPI reset failed.\n");
789 				}
790 
791 				bsg_job->reply->reply_payload_rcv_len = 0;
792 				bsg_job->reply->result = (DID_ERROR << 16);
793 				rval = -EIO;
794 				goto done_free_dma_req;
795 			}
796 		} else {
797 			type = "FC_BSG_HST_VENDOR_LOOPBACK";
798 			ql_dbg(ql_dbg_user, vha, 0x702b,
799 			    "BSG request type: %s.\n", type);
800 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
801 			rval = qla2x00_loopback_test(vha, &elreq, response);
802 		}
803 	}
804 
805 	if (rval) {
806 		ql_log(ql_log_warn, vha, 0x702c,
807 		    "Vendor request %s failed.\n", type);
808 
809 		fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
810 		    sizeof(struct fc_bsg_reply);
811 
812 		memcpy(fw_sts_ptr, response, sizeof(response));
813 		fw_sts_ptr += sizeof(response);
814 		*fw_sts_ptr = command_sent;
815 		rval = 0;
816 		bsg_job->reply->reply_payload_rcv_len = 0;
817 		bsg_job->reply->result = (DID_ERROR << 16);
818 	} else {
819 		ql_dbg(ql_dbg_user, vha, 0x702d,
820 		    "Vendor request %s completed.\n", type);
821 
822 		bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
823 			sizeof(response) + sizeof(uint8_t);
824 		bsg_job->reply->reply_payload_rcv_len =
825 			bsg_job->reply_payload.payload_len;
826 		fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
827 			sizeof(struct fc_bsg_reply);
828 		memcpy(fw_sts_ptr, response, sizeof(response));
829 		fw_sts_ptr += sizeof(response);
830 		*fw_sts_ptr = command_sent;
831 		bsg_job->reply->result = DID_OK;
832 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
833 			bsg_job->reply_payload.sg_cnt, rsp_data,
834 			rsp_data_len);
835 	}
836 	bsg_job->job_done(bsg_job);
837 
838 	dma_free_coherent(&ha->pdev->dev, rsp_data_len,
839 		rsp_data, rsp_data_dma);
840 done_free_dma_req:
841 	dma_free_coherent(&ha->pdev->dev, req_data_len,
842 		req_data, req_data_dma);
843 done_unmap_sg:
844 	dma_unmap_sg(&ha->pdev->dev,
845 	    bsg_job->reply_payload.sg_list,
846 	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
847 done_unmap_req_sg:
848 	dma_unmap_sg(&ha->pdev->dev,
849 	    bsg_job->request_payload.sg_list,
850 	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
851 	return rval;
852 }
853 
854 static int
qla84xx_reset(struct fc_bsg_job * bsg_job)855 qla84xx_reset(struct fc_bsg_job *bsg_job)
856 {
857 	struct Scsi_Host *host = bsg_job->shost;
858 	scsi_qla_host_t *vha = shost_priv(host);
859 	struct qla_hw_data *ha = vha->hw;
860 	int rval = 0;
861 	uint32_t flag;
862 
863 	if (!IS_QLA84XX(ha)) {
864 		ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
865 		return -EINVAL;
866 	}
867 
868 	flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
869 
870 	rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
871 
872 	if (rval) {
873 		ql_log(ql_log_warn, vha, 0x7030,
874 		    "Vendor request 84xx reset failed.\n");
875 		rval = bsg_job->reply->reply_payload_rcv_len = 0;
876 		bsg_job->reply->result = (DID_ERROR << 16);
877 
878 	} else {
879 		ql_dbg(ql_dbg_user, vha, 0x7031,
880 		    "Vendor request 84xx reset completed.\n");
881 		bsg_job->reply->result = DID_OK;
882 	}
883 
884 	bsg_job->job_done(bsg_job);
885 	return rval;
886 }
887 
888 static int
qla84xx_updatefw(struct fc_bsg_job * bsg_job)889 qla84xx_updatefw(struct fc_bsg_job *bsg_job)
890 {
891 	struct Scsi_Host *host = bsg_job->shost;
892 	scsi_qla_host_t *vha = shost_priv(host);
893 	struct qla_hw_data *ha = vha->hw;
894 	struct verify_chip_entry_84xx *mn = NULL;
895 	dma_addr_t mn_dma, fw_dma;
896 	void *fw_buf = NULL;
897 	int rval = 0;
898 	uint32_t sg_cnt;
899 	uint32_t data_len;
900 	uint16_t options;
901 	uint32_t flag;
902 	uint32_t fw_ver;
903 
904 	if (!IS_QLA84XX(ha)) {
905 		ql_dbg(ql_dbg_user, vha, 0x7032,
906 		    "Not 84xx, exiting.\n");
907 		return -EINVAL;
908 	}
909 
910 	sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
911 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
912 	if (!sg_cnt) {
913 		ql_log(ql_log_warn, vha, 0x7033,
914 		    "dma_map_sg returned %d for request.\n", sg_cnt);
915 		return -ENOMEM;
916 	}
917 
918 	if (sg_cnt != bsg_job->request_payload.sg_cnt) {
919 		ql_log(ql_log_warn, vha, 0x7034,
920 		    "DMA mapping resulted in different sg counts, "
921 		    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
922 		    bsg_job->request_payload.sg_cnt, sg_cnt);
923 		rval = -EAGAIN;
924 		goto done_unmap_sg;
925 	}
926 
927 	data_len = bsg_job->request_payload.payload_len;
928 	fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
929 		&fw_dma, GFP_KERNEL);
930 	if (!fw_buf) {
931 		ql_log(ql_log_warn, vha, 0x7035,
932 		    "DMA alloc failed for fw_buf.\n");
933 		rval = -ENOMEM;
934 		goto done_unmap_sg;
935 	}
936 
937 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
938 		bsg_job->request_payload.sg_cnt, fw_buf, data_len);
939 
940 	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
941 	if (!mn) {
942 		ql_log(ql_log_warn, vha, 0x7036,
943 		    "DMA alloc failed for fw buffer.\n");
944 		rval = -ENOMEM;
945 		goto done_free_fw_buf;
946 	}
947 
948 	flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
949 	fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
950 
951 	memset(mn, 0, sizeof(struct access_chip_84xx));
952 	mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
953 	mn->entry_count = 1;
954 
955 	options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
956 	if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
957 		options |= VCO_DIAG_FW;
958 
959 	mn->options = cpu_to_le16(options);
960 	mn->fw_ver =  cpu_to_le32(fw_ver);
961 	mn->fw_size =  cpu_to_le32(data_len);
962 	mn->fw_seq_size =  cpu_to_le32(data_len);
963 	mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
964 	mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
965 	mn->dseg_length = cpu_to_le32(data_len);
966 	mn->data_seg_cnt = cpu_to_le16(1);
967 
968 	rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
969 
970 	if (rval) {
971 		ql_log(ql_log_warn, vha, 0x7037,
972 		    "Vendor request 84xx updatefw failed.\n");
973 
974 		rval = bsg_job->reply->reply_payload_rcv_len = 0;
975 		bsg_job->reply->result = (DID_ERROR << 16);
976 
977 	} else {
978 		ql_dbg(ql_dbg_user, vha, 0x7038,
979 		    "Vendor request 84xx updatefw completed.\n");
980 
981 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
982 		bsg_job->reply->result = DID_OK;
983 	}
984 
985 	bsg_job->job_done(bsg_job);
986 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
987 
988 done_free_fw_buf:
989 	dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
990 
991 done_unmap_sg:
992 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
993 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
994 
995 	return rval;
996 }
997 
998 static int
qla84xx_mgmt_cmd(struct fc_bsg_job * bsg_job)999 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1000 {
1001 	struct Scsi_Host *host = bsg_job->shost;
1002 	scsi_qla_host_t *vha = shost_priv(host);
1003 	struct qla_hw_data *ha = vha->hw;
1004 	struct access_chip_84xx *mn = NULL;
1005 	dma_addr_t mn_dma, mgmt_dma;
1006 	void *mgmt_b = NULL;
1007 	int rval = 0;
1008 	struct qla_bsg_a84_mgmt *ql84_mgmt;
1009 	uint32_t sg_cnt;
1010 	uint32_t data_len = 0;
1011 	uint32_t dma_direction = DMA_NONE;
1012 
1013 	if (!IS_QLA84XX(ha)) {
1014 		ql_log(ql_log_warn, vha, 0x703a,
1015 		    "Not 84xx, exiting.\n");
1016 		return -EINVAL;
1017 	}
1018 
1019 	ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1020 		sizeof(struct fc_bsg_request));
1021 	if (!ql84_mgmt) {
1022 		ql_log(ql_log_warn, vha, 0x703b,
1023 		    "MGMT header not provided, exiting.\n");
1024 		return -EINVAL;
1025 	}
1026 
1027 	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1028 	if (!mn) {
1029 		ql_log(ql_log_warn, vha, 0x703c,
1030 		    "DMA alloc failed for fw buffer.\n");
1031 		return -ENOMEM;
1032 	}
1033 
1034 	memset(mn, 0, sizeof(struct access_chip_84xx));
1035 	mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1036 	mn->entry_count = 1;
1037 
1038 	switch (ql84_mgmt->mgmt.cmd) {
1039 	case QLA84_MGMT_READ_MEM:
1040 	case QLA84_MGMT_GET_INFO:
1041 		sg_cnt = dma_map_sg(&ha->pdev->dev,
1042 			bsg_job->reply_payload.sg_list,
1043 			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1044 		if (!sg_cnt) {
1045 			ql_log(ql_log_warn, vha, 0x703d,
1046 			    "dma_map_sg returned %d for reply.\n", sg_cnt);
1047 			rval = -ENOMEM;
1048 			goto exit_mgmt;
1049 		}
1050 
1051 		dma_direction = DMA_FROM_DEVICE;
1052 
1053 		if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1054 			ql_log(ql_log_warn, vha, 0x703e,
1055 			    "DMA mapping resulted in different sg counts, "
1056 			    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1057 			    bsg_job->reply_payload.sg_cnt, sg_cnt);
1058 			rval = -EAGAIN;
1059 			goto done_unmap_sg;
1060 		}
1061 
1062 		data_len = bsg_job->reply_payload.payload_len;
1063 
1064 		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1065 		    &mgmt_dma, GFP_KERNEL);
1066 		if (!mgmt_b) {
1067 			ql_log(ql_log_warn, vha, 0x703f,
1068 			    "DMA alloc failed for mgmt_b.\n");
1069 			rval = -ENOMEM;
1070 			goto done_unmap_sg;
1071 		}
1072 
1073 		if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1074 			mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1075 			mn->parameter1 =
1076 				cpu_to_le32(
1077 				ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1078 
1079 		} else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1080 			mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1081 			mn->parameter1 =
1082 				cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1083 
1084 			mn->parameter2 =
1085 				cpu_to_le32(
1086 				ql84_mgmt->mgmt.mgmtp.u.info.context);
1087 		}
1088 		break;
1089 
1090 	case QLA84_MGMT_WRITE_MEM:
1091 		sg_cnt = dma_map_sg(&ha->pdev->dev,
1092 			bsg_job->request_payload.sg_list,
1093 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1094 
1095 		if (!sg_cnt) {
1096 			ql_log(ql_log_warn, vha, 0x7040,
1097 			    "dma_map_sg returned %d.\n", sg_cnt);
1098 			rval = -ENOMEM;
1099 			goto exit_mgmt;
1100 		}
1101 
1102 		dma_direction = DMA_TO_DEVICE;
1103 
1104 		if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1105 			ql_log(ql_log_warn, vha, 0x7041,
1106 			    "DMA mapping resulted in different sg counts, "
1107 			    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1108 			    bsg_job->request_payload.sg_cnt, sg_cnt);
1109 			rval = -EAGAIN;
1110 			goto done_unmap_sg;
1111 		}
1112 
1113 		data_len = bsg_job->request_payload.payload_len;
1114 		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1115 			&mgmt_dma, GFP_KERNEL);
1116 		if (!mgmt_b) {
1117 			ql_log(ql_log_warn, vha, 0x7042,
1118 			    "DMA alloc failed for mgmt_b.\n");
1119 			rval = -ENOMEM;
1120 			goto done_unmap_sg;
1121 		}
1122 
1123 		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1124 			bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1125 
1126 		mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1127 		mn->parameter1 =
1128 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1129 		break;
1130 
1131 	case QLA84_MGMT_CHNG_CONFIG:
1132 		mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1133 		mn->parameter1 =
1134 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1135 
1136 		mn->parameter2 =
1137 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1138 
1139 		mn->parameter3 =
1140 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1141 		break;
1142 
1143 	default:
1144 		rval = -EIO;
1145 		goto exit_mgmt;
1146 	}
1147 
1148 	if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1149 		mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1150 		mn->dseg_count = cpu_to_le16(1);
1151 		mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1152 		mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1153 		mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1154 	}
1155 
1156 	rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1157 
1158 	if (rval) {
1159 		ql_log(ql_log_warn, vha, 0x7043,
1160 		    "Vendor request 84xx mgmt failed.\n");
1161 
1162 		rval = bsg_job->reply->reply_payload_rcv_len = 0;
1163 		bsg_job->reply->result = (DID_ERROR << 16);
1164 
1165 	} else {
1166 		ql_dbg(ql_dbg_user, vha, 0x7044,
1167 		    "Vendor request 84xx mgmt completed.\n");
1168 
1169 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1170 		bsg_job->reply->result = DID_OK;
1171 
1172 		if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1173 			(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1174 			bsg_job->reply->reply_payload_rcv_len =
1175 				bsg_job->reply_payload.payload_len;
1176 
1177 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1178 				bsg_job->reply_payload.sg_cnt, mgmt_b,
1179 				data_len);
1180 		}
1181 	}
1182 
1183 	bsg_job->job_done(bsg_job);
1184 
1185 done_unmap_sg:
1186 	if (mgmt_b)
1187 		dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1188 
1189 	if (dma_direction == DMA_TO_DEVICE)
1190 		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1191 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1192 	else if (dma_direction == DMA_FROM_DEVICE)
1193 		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1194 			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1195 
1196 exit_mgmt:
1197 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1198 
1199 	return rval;
1200 }
1201 
1202 static int
qla24xx_iidma(struct fc_bsg_job * bsg_job)1203 qla24xx_iidma(struct fc_bsg_job *bsg_job)
1204 {
1205 	struct Scsi_Host *host = bsg_job->shost;
1206 	scsi_qla_host_t *vha = shost_priv(host);
1207 	int rval = 0;
1208 	struct qla_port_param *port_param = NULL;
1209 	fc_port_t *fcport = NULL;
1210 	uint16_t mb[MAILBOX_REGISTER_COUNT];
1211 	uint8_t *rsp_ptr = NULL;
1212 
1213 	bsg_job->reply->reply_payload_rcv_len = 0;
1214 
1215 	if (!IS_IIDMA_CAPABLE(vha->hw)) {
1216 		ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1217 		return -EINVAL;
1218 	}
1219 
1220 	port_param = (struct qla_port_param *)((char *)bsg_job->request +
1221 		sizeof(struct fc_bsg_request));
1222 	if (!port_param) {
1223 		ql_log(ql_log_warn, vha, 0x7047,
1224 		    "port_param header not provided.\n");
1225 		return -EINVAL;
1226 	}
1227 
1228 	if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1229 		ql_log(ql_log_warn, vha, 0x7048,
1230 		    "Invalid destination type.\n");
1231 		return -EINVAL;
1232 	}
1233 
1234 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1235 		if (fcport->port_type != FCT_TARGET)
1236 			continue;
1237 
1238 		if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1239 			fcport->port_name, sizeof(fcport->port_name)))
1240 			continue;
1241 		break;
1242 	}
1243 
1244 	if (!fcport) {
1245 		ql_log(ql_log_warn, vha, 0x7049,
1246 		    "Failed to find port.\n");
1247 		return -EINVAL;
1248 	}
1249 
1250 	if (atomic_read(&fcport->state) != FCS_ONLINE) {
1251 		ql_log(ql_log_warn, vha, 0x704a,
1252 		    "Port is not online.\n");
1253 		return -EINVAL;
1254 	}
1255 
1256 	if (fcport->flags & FCF_LOGIN_NEEDED) {
1257 		ql_log(ql_log_warn, vha, 0x704b,
1258 		    "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1259 		return -EINVAL;
1260 	}
1261 
1262 	if (port_param->mode)
1263 		rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1264 			port_param->speed, mb);
1265 	else
1266 		rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1267 			&port_param->speed, mb);
1268 
1269 	if (rval) {
1270 		ql_log(ql_log_warn, vha, 0x704c,
1271 		    "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1272 		    "%04x %x %04x %04x.\n", fcport->port_name[0],
1273 		    fcport->port_name[1], fcport->port_name[2],
1274 		    fcport->port_name[3], fcport->port_name[4],
1275 		    fcport->port_name[5], fcport->port_name[6],
1276 		    fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1277 		rval = 0;
1278 		bsg_job->reply->result = (DID_ERROR << 16);
1279 
1280 	} else {
1281 		if (!port_param->mode) {
1282 			bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1283 				sizeof(struct qla_port_param);
1284 
1285 			rsp_ptr = ((uint8_t *)bsg_job->reply) +
1286 				sizeof(struct fc_bsg_reply);
1287 
1288 			memcpy(rsp_ptr, port_param,
1289 				sizeof(struct qla_port_param));
1290 		}
1291 
1292 		bsg_job->reply->result = DID_OK;
1293 	}
1294 
1295 	bsg_job->job_done(bsg_job);
1296 	return rval;
1297 }
1298 
1299 static int
qla2x00_optrom_setup(struct fc_bsg_job * bsg_job,scsi_qla_host_t * vha,uint8_t is_update)1300 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1301 	uint8_t is_update)
1302 {
1303 	uint32_t start = 0;
1304 	int valid = 0;
1305 	struct qla_hw_data *ha = vha->hw;
1306 
1307 	bsg_job->reply->reply_payload_rcv_len = 0;
1308 
1309 	if (unlikely(pci_channel_offline(ha->pdev)))
1310 		return -EINVAL;
1311 
1312 	start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1313 	if (start > ha->optrom_size) {
1314 		ql_log(ql_log_warn, vha, 0x7055,
1315 		    "start %d > optrom_size %d.\n", start, ha->optrom_size);
1316 		return -EINVAL;
1317 	}
1318 
1319 	if (ha->optrom_state != QLA_SWAITING) {
1320 		ql_log(ql_log_info, vha, 0x7056,
1321 		    "optrom_state %d.\n", ha->optrom_state);
1322 		return -EBUSY;
1323 	}
1324 
1325 	ha->optrom_region_start = start;
1326 	ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1327 	if (is_update) {
1328 		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1329 			valid = 1;
1330 		else if (start == (ha->flt_region_boot * 4) ||
1331 		    start == (ha->flt_region_fw * 4))
1332 			valid = 1;
1333 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1334 		    IS_QLA8XXX_TYPE(ha))
1335 			valid = 1;
1336 		if (!valid) {
1337 			ql_log(ql_log_warn, vha, 0x7058,
1338 			    "Invalid start region 0x%x/0x%x.\n", start,
1339 			    bsg_job->request_payload.payload_len);
1340 			return -EINVAL;
1341 		}
1342 
1343 		ha->optrom_region_size = start +
1344 		    bsg_job->request_payload.payload_len > ha->optrom_size ?
1345 		    ha->optrom_size - start :
1346 		    bsg_job->request_payload.payload_len;
1347 		ha->optrom_state = QLA_SWRITING;
1348 	} else {
1349 		ha->optrom_region_size = start +
1350 		    bsg_job->reply_payload.payload_len > ha->optrom_size ?
1351 		    ha->optrom_size - start :
1352 		    bsg_job->reply_payload.payload_len;
1353 		ha->optrom_state = QLA_SREADING;
1354 	}
1355 
1356 	ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1357 	if (!ha->optrom_buffer) {
1358 		ql_log(ql_log_warn, vha, 0x7059,
1359 		    "Read: Unable to allocate memory for optrom retrieval "
1360 		    "(%x)\n", ha->optrom_region_size);
1361 
1362 		ha->optrom_state = QLA_SWAITING;
1363 		return -ENOMEM;
1364 	}
1365 
1366 	memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1367 	return 0;
1368 }
1369 
1370 static int
qla2x00_read_optrom(struct fc_bsg_job * bsg_job)1371 qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1372 {
1373 	struct Scsi_Host *host = bsg_job->shost;
1374 	scsi_qla_host_t *vha = shost_priv(host);
1375 	struct qla_hw_data *ha = vha->hw;
1376 	int rval = 0;
1377 
1378 	rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1379 	if (rval)
1380 		return rval;
1381 
1382 	ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1383 	    ha->optrom_region_start, ha->optrom_region_size);
1384 
1385 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1386 	    bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1387 	    ha->optrom_region_size);
1388 
1389 	bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1390 	bsg_job->reply->result = DID_OK;
1391 	vfree(ha->optrom_buffer);
1392 	ha->optrom_buffer = NULL;
1393 	ha->optrom_state = QLA_SWAITING;
1394 	bsg_job->job_done(bsg_job);
1395 	return rval;
1396 }
1397 
1398 static int
qla2x00_update_optrom(struct fc_bsg_job * bsg_job)1399 qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1400 {
1401 	struct Scsi_Host *host = bsg_job->shost;
1402 	scsi_qla_host_t *vha = shost_priv(host);
1403 	struct qla_hw_data *ha = vha->hw;
1404 	int rval = 0;
1405 
1406 	rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1407 	if (rval)
1408 		return rval;
1409 
1410 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1411 	    bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1412 	    ha->optrom_region_size);
1413 
1414 	ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1415 	    ha->optrom_region_start, ha->optrom_region_size);
1416 
1417 	bsg_job->reply->result = DID_OK;
1418 	vfree(ha->optrom_buffer);
1419 	ha->optrom_buffer = NULL;
1420 	ha->optrom_state = QLA_SWAITING;
1421 	bsg_job->job_done(bsg_job);
1422 	return rval;
1423 }
1424 
1425 static int
qla2x00_update_fru_versions(struct fc_bsg_job * bsg_job)1426 qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1427 {
1428 	struct Scsi_Host *host = bsg_job->shost;
1429 	scsi_qla_host_t *vha = shost_priv(host);
1430 	struct qla_hw_data *ha = vha->hw;
1431 	int rval = 0;
1432 	uint8_t bsg[DMA_POOL_SIZE];
1433 	struct qla_image_version_list *list = (void *)bsg;
1434 	struct qla_image_version *image;
1435 	uint32_t count;
1436 	dma_addr_t sfp_dma;
1437 	void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1438 	if (!sfp) {
1439 		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1440 		    EXT_STATUS_NO_MEMORY;
1441 		goto done;
1442 	}
1443 
1444 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1445 	    bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1446 
1447 	image = list->version;
1448 	count = list->count;
1449 	while (count--) {
1450 		memcpy(sfp, &image->field_info, sizeof(image->field_info));
1451 		rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1452 		    image->field_address.device, image->field_address.offset,
1453 		    sizeof(image->field_info), image->field_address.option);
1454 		if (rval) {
1455 			bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1456 			    EXT_STATUS_MAILBOX;
1457 			goto dealloc;
1458 		}
1459 		image++;
1460 	}
1461 
1462 	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1463 
1464 dealloc:
1465 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1466 
1467 done:
1468 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1469 	bsg_job->reply->result = DID_OK << 16;
1470 	bsg_job->job_done(bsg_job);
1471 
1472 	return 0;
1473 }
1474 
1475 static int
qla2x00_read_fru_status(struct fc_bsg_job * bsg_job)1476 qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1477 {
1478 	struct Scsi_Host *host = bsg_job->shost;
1479 	scsi_qla_host_t *vha = shost_priv(host);
1480 	struct qla_hw_data *ha = vha->hw;
1481 	int rval = 0;
1482 	uint8_t bsg[DMA_POOL_SIZE];
1483 	struct qla_status_reg *sr = (void *)bsg;
1484 	dma_addr_t sfp_dma;
1485 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1486 	if (!sfp) {
1487 		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1488 		    EXT_STATUS_NO_MEMORY;
1489 		goto done;
1490 	}
1491 
1492 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1493 	    bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1494 
1495 	rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1496 	    sr->field_address.device, sr->field_address.offset,
1497 	    sizeof(sr->status_reg), sr->field_address.option);
1498 	sr->status_reg = *sfp;
1499 
1500 	if (rval) {
1501 		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1502 		    EXT_STATUS_MAILBOX;
1503 		goto dealloc;
1504 	}
1505 
1506 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1507 	    bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1508 
1509 	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1510 
1511 dealloc:
1512 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1513 
1514 done:
1515 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1516 	bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1517 	bsg_job->reply->result = DID_OK << 16;
1518 	bsg_job->job_done(bsg_job);
1519 
1520 	return 0;
1521 }
1522 
1523 static int
qla2x00_write_fru_status(struct fc_bsg_job * bsg_job)1524 qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1525 {
1526 	struct Scsi_Host *host = bsg_job->shost;
1527 	scsi_qla_host_t *vha = shost_priv(host);
1528 	struct qla_hw_data *ha = vha->hw;
1529 	int rval = 0;
1530 	uint8_t bsg[DMA_POOL_SIZE];
1531 	struct qla_status_reg *sr = (void *)bsg;
1532 	dma_addr_t sfp_dma;
1533 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1534 	if (!sfp) {
1535 		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1536 		    EXT_STATUS_NO_MEMORY;
1537 		goto done;
1538 	}
1539 
1540 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1541 	    bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1542 
1543 	*sfp = sr->status_reg;
1544 	rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1545 	    sr->field_address.device, sr->field_address.offset,
1546 	    sizeof(sr->status_reg), sr->field_address.option);
1547 
1548 	if (rval) {
1549 		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1550 		    EXT_STATUS_MAILBOX;
1551 		goto dealloc;
1552 	}
1553 
1554 	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1555 
1556 dealloc:
1557 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1558 
1559 done:
1560 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1561 	bsg_job->reply->result = DID_OK << 16;
1562 	bsg_job->job_done(bsg_job);
1563 
1564 	return 0;
1565 }
1566 
1567 static int
qla2x00_process_vendor_specific(struct fc_bsg_job * bsg_job)1568 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1569 {
1570 	switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1571 	case QL_VND_LOOPBACK:
1572 		return qla2x00_process_loopback(bsg_job);
1573 
1574 	case QL_VND_A84_RESET:
1575 		return qla84xx_reset(bsg_job);
1576 
1577 	case QL_VND_A84_UPDATE_FW:
1578 		return qla84xx_updatefw(bsg_job);
1579 
1580 	case QL_VND_A84_MGMT_CMD:
1581 		return qla84xx_mgmt_cmd(bsg_job);
1582 
1583 	case QL_VND_IIDMA:
1584 		return qla24xx_iidma(bsg_job);
1585 
1586 	case QL_VND_FCP_PRIO_CFG_CMD:
1587 		return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1588 
1589 	case QL_VND_READ_FLASH:
1590 		return qla2x00_read_optrom(bsg_job);
1591 
1592 	case QL_VND_UPDATE_FLASH:
1593 		return qla2x00_update_optrom(bsg_job);
1594 
1595 	case QL_VND_SET_FRU_VERSION:
1596 		return qla2x00_update_fru_versions(bsg_job);
1597 
1598 	case QL_VND_READ_FRU_STATUS:
1599 		return qla2x00_read_fru_status(bsg_job);
1600 
1601 	case QL_VND_WRITE_FRU_STATUS:
1602 		return qla2x00_write_fru_status(bsg_job);
1603 
1604 	default:
1605 		bsg_job->reply->result = (DID_ERROR << 16);
1606 		bsg_job->job_done(bsg_job);
1607 		return -ENOSYS;
1608 	}
1609 }
1610 
1611 int
qla24xx_bsg_request(struct fc_bsg_job * bsg_job)1612 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1613 {
1614 	int ret = -EINVAL;
1615 	struct fc_rport *rport;
1616 	fc_port_t *fcport = NULL;
1617 	struct Scsi_Host *host;
1618 	scsi_qla_host_t *vha;
1619 
1620 	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1621 		rport = bsg_job->rport;
1622 		fcport = *(fc_port_t **) rport->dd_data;
1623 		host = rport_to_shost(rport);
1624 		vha = shost_priv(host);
1625 	} else {
1626 		host = bsg_job->shost;
1627 		vha = shost_priv(host);
1628 	}
1629 
1630 	if (qla2x00_reset_active(vha)) {
1631 		ql_dbg(ql_dbg_user, vha, 0x709f,
1632 		    "BSG: ISP abort active/needed -- cmd=%d.\n",
1633 		    bsg_job->request->msgcode);
1634 		bsg_job->reply->result = (DID_ERROR << 16);
1635 		bsg_job->job_done(bsg_job);
1636 		return -EBUSY;
1637 	}
1638 
1639 	ql_dbg(ql_dbg_user, vha, 0x7000,
1640 	    "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
1641 
1642 	switch (bsg_job->request->msgcode) {
1643 	case FC_BSG_RPT_ELS:
1644 	case FC_BSG_HST_ELS_NOLOGIN:
1645 		ret = qla2x00_process_els(bsg_job);
1646 		break;
1647 	case FC_BSG_HST_CT:
1648 		ret = qla2x00_process_ct(bsg_job);
1649 		break;
1650 	case FC_BSG_HST_VENDOR:
1651 		ret = qla2x00_process_vendor_specific(bsg_job);
1652 		break;
1653 	case FC_BSG_HST_ADD_RPORT:
1654 	case FC_BSG_HST_DEL_RPORT:
1655 	case FC_BSG_RPT_CT:
1656 	default:
1657 		ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1658 		break;
1659 	}
1660 	return ret;
1661 }
1662 
1663 int
qla24xx_bsg_timeout(struct fc_bsg_job * bsg_job)1664 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1665 {
1666 	scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1667 	struct qla_hw_data *ha = vha->hw;
1668 	srb_t *sp;
1669 	int cnt, que;
1670 	unsigned long flags;
1671 	struct req_que *req;
1672 	struct srb_ctx *sp_bsg;
1673 
1674 	/* find the bsg job from the active list of commands */
1675 	spin_lock_irqsave(&ha->hardware_lock, flags);
1676 	for (que = 0; que < ha->max_req_queues; que++) {
1677 		req = ha->req_q_map[que];
1678 		if (!req)
1679 			continue;
1680 
1681 		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1682 			sp = req->outstanding_cmds[cnt];
1683 			if (sp) {
1684 				sp_bsg = sp->ctx;
1685 
1686 				if (((sp_bsg->type == SRB_CT_CMD) ||
1687 					(sp_bsg->type == SRB_ELS_CMD_HST))
1688 					&& (sp_bsg->u.bsg_job == bsg_job)) {
1689 					spin_unlock_irqrestore(&ha->hardware_lock, flags);
1690 					if (ha->isp_ops->abort_command(sp)) {
1691 						ql_log(ql_log_warn, vha, 0x7089,
1692 						    "mbx abort_command "
1693 						    "failed.\n");
1694 						bsg_job->req->errors =
1695 						bsg_job->reply->result = -EIO;
1696 					} else {
1697 						ql_dbg(ql_dbg_user, vha, 0x708a,
1698 						    "mbx abort_command "
1699 						    "success.\n");
1700 						bsg_job->req->errors =
1701 						bsg_job->reply->result = 0;
1702 					}
1703 					spin_lock_irqsave(&ha->hardware_lock, flags);
1704 					goto done;
1705 				}
1706 			}
1707 		}
1708 	}
1709 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1710 	ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
1711 	bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1712 	return 0;
1713 
1714 done:
1715 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1716 	if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1717 		kfree(sp->fcport);
1718 	kfree(sp->ctx);
1719 	mempool_free(sp, ha->srb_mempool);
1720 	return 0;
1721 }
1722