1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7
8 #include <linux/kthread.h>
9 #include <linux/vmalloc.h>
10 #include <linux/delay.h>
11 #include <linux/bsg-lib.h>
12
qla2xxx_free_fcport_work(struct work_struct * work)13 static void qla2xxx_free_fcport_work(struct work_struct *work)
14 {
15 struct fc_port *fcport = container_of(work, typeof(*fcport),
16 free_work);
17
18 qla2x00_free_fcport(fcport);
19 }
20
21 /* BSG support for ELS/CT pass through */
qla2x00_bsg_job_done(srb_t * sp,int res)22 void qla2x00_bsg_job_done(srb_t *sp, int res)
23 {
24 struct bsg_job *bsg_job = sp->u.bsg_job;
25 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
26
27 bsg_reply->result = res;
28 bsg_job_done(bsg_job, bsg_reply->result,
29 bsg_reply->reply_payload_rcv_len);
30 sp->free(sp);
31 }
32
qla2x00_bsg_sp_free(srb_t * sp)33 void qla2x00_bsg_sp_free(srb_t *sp)
34 {
35 struct qla_hw_data *ha = sp->vha->hw;
36 struct bsg_job *bsg_job = sp->u.bsg_job;
37 struct fc_bsg_request *bsg_request = bsg_job->request;
38 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
39
40 if (sp->type == SRB_FXIOCB_BCMD) {
41 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
42 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
43
44 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
45 dma_unmap_sg(&ha->pdev->dev,
46 bsg_job->request_payload.sg_list,
47 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
48
49 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
50 dma_unmap_sg(&ha->pdev->dev,
51 bsg_job->reply_payload.sg_list,
52 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
53 } else {
54 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
55 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
56
57 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
58 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
59 }
60
61 if (sp->type == SRB_CT_CMD ||
62 sp->type == SRB_FXIOCB_BCMD ||
63 sp->type == SRB_ELS_CMD_HST) {
64 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
65 queue_work(ha->wq, &sp->fcport->free_work);
66 }
67
68 qla2x00_rel_sp(sp);
69 }
70
71 int
qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t * vha,struct qla_fcp_prio_cfg * pri_cfg,uint8_t flag)72 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
73 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
74 {
75 int i, ret, num_valid;
76 uint8_t *bcode;
77 struct qla_fcp_prio_entry *pri_entry;
78 uint32_t *bcode_val_ptr, bcode_val;
79
80 ret = 1;
81 num_valid = 0;
82 bcode = (uint8_t *)pri_cfg;
83 bcode_val_ptr = (uint32_t *)pri_cfg;
84 bcode_val = (uint32_t)(*bcode_val_ptr);
85
86 if (bcode_val == 0xFFFFFFFF) {
87 /* No FCP Priority config data in flash */
88 ql_dbg(ql_dbg_user, vha, 0x7051,
89 "No FCP Priority config data.\n");
90 return 0;
91 }
92
93 if (memcmp(bcode, "HQOS", 4)) {
94 /* Invalid FCP priority data header*/
95 ql_dbg(ql_dbg_user, vha, 0x7052,
96 "Invalid FCP Priority data header. bcode=0x%x.\n",
97 bcode_val);
98 return 0;
99 }
100 if (flag != 1)
101 return ret;
102
103 pri_entry = &pri_cfg->entry[0];
104 for (i = 0; i < pri_cfg->num_entries; i++) {
105 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
106 num_valid++;
107 pri_entry++;
108 }
109
110 if (num_valid == 0) {
111 /* No valid FCP priority data entries */
112 ql_dbg(ql_dbg_user, vha, 0x7053,
113 "No valid FCP Priority data entries.\n");
114 ret = 0;
115 } else {
116 /* FCP priority data is valid */
117 ql_dbg(ql_dbg_user, vha, 0x7054,
118 "Valid FCP priority data. num entries = %d.\n",
119 num_valid);
120 }
121
122 return ret;
123 }
124
125 static int
qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job * bsg_job)126 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
127 {
128 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
129 struct fc_bsg_request *bsg_request = bsg_job->request;
130 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
131 scsi_qla_host_t *vha = shost_priv(host);
132 struct qla_hw_data *ha = vha->hw;
133 int ret = 0;
134 uint32_t len;
135 uint32_t oper;
136
137 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
138 ret = -EINVAL;
139 goto exit_fcp_prio_cfg;
140 }
141
142 /* Get the sub command */
143 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
144
145 /* Only set config is allowed if config memory is not allocated */
146 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
147 ret = -EINVAL;
148 goto exit_fcp_prio_cfg;
149 }
150 switch (oper) {
151 case QLFC_FCP_PRIO_DISABLE:
152 if (ha->flags.fcp_prio_enabled) {
153 ha->flags.fcp_prio_enabled = 0;
154 ha->fcp_prio_cfg->attributes &=
155 ~FCP_PRIO_ATTR_ENABLE;
156 qla24xx_update_all_fcp_prio(vha);
157 bsg_reply->result = DID_OK;
158 } else {
159 ret = -EINVAL;
160 bsg_reply->result = (DID_ERROR << 16);
161 goto exit_fcp_prio_cfg;
162 }
163 break;
164
165 case QLFC_FCP_PRIO_ENABLE:
166 if (!ha->flags.fcp_prio_enabled) {
167 if (ha->fcp_prio_cfg) {
168 ha->flags.fcp_prio_enabled = 1;
169 ha->fcp_prio_cfg->attributes |=
170 FCP_PRIO_ATTR_ENABLE;
171 qla24xx_update_all_fcp_prio(vha);
172 bsg_reply->result = DID_OK;
173 } else {
174 ret = -EINVAL;
175 bsg_reply->result = (DID_ERROR << 16);
176 goto exit_fcp_prio_cfg;
177 }
178 }
179 break;
180
181 case QLFC_FCP_PRIO_GET_CONFIG:
182 len = bsg_job->reply_payload.payload_len;
183 if (!len || len > FCP_PRIO_CFG_SIZE) {
184 ret = -EINVAL;
185 bsg_reply->result = (DID_ERROR << 16);
186 goto exit_fcp_prio_cfg;
187 }
188
189 bsg_reply->result = DID_OK;
190 bsg_reply->reply_payload_rcv_len =
191 sg_copy_from_buffer(
192 bsg_job->reply_payload.sg_list,
193 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
194 len);
195
196 break;
197
198 case QLFC_FCP_PRIO_SET_CONFIG:
199 len = bsg_job->request_payload.payload_len;
200 if (!len || len > FCP_PRIO_CFG_SIZE) {
201 bsg_reply->result = (DID_ERROR << 16);
202 ret = -EINVAL;
203 goto exit_fcp_prio_cfg;
204 }
205
206 if (!ha->fcp_prio_cfg) {
207 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
208 if (!ha->fcp_prio_cfg) {
209 ql_log(ql_log_warn, vha, 0x7050,
210 "Unable to allocate memory for fcp prio "
211 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
212 bsg_reply->result = (DID_ERROR << 16);
213 ret = -ENOMEM;
214 goto exit_fcp_prio_cfg;
215 }
216 }
217
218 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
219 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
220 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
221 FCP_PRIO_CFG_SIZE);
222
223 /* validate fcp priority data */
224
225 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
226 bsg_reply->result = (DID_ERROR << 16);
227 ret = -EINVAL;
228 /* If buffer was invalidatic int
229 * fcp_prio_cfg is of no use
230 */
231 vfree(ha->fcp_prio_cfg);
232 ha->fcp_prio_cfg = NULL;
233 goto exit_fcp_prio_cfg;
234 }
235
236 ha->flags.fcp_prio_enabled = 0;
237 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
238 ha->flags.fcp_prio_enabled = 1;
239 qla24xx_update_all_fcp_prio(vha);
240 bsg_reply->result = DID_OK;
241 break;
242 default:
243 ret = -EINVAL;
244 break;
245 }
246 exit_fcp_prio_cfg:
247 if (!ret)
248 bsg_job_done(bsg_job, bsg_reply->result,
249 bsg_reply->reply_payload_rcv_len);
250 return ret;
251 }
252
253 static int
qla2x00_process_els(struct bsg_job * bsg_job)254 qla2x00_process_els(struct bsg_job *bsg_job)
255 {
256 struct fc_bsg_request *bsg_request = bsg_job->request;
257 struct fc_rport *rport;
258 fc_port_t *fcport = NULL;
259 struct Scsi_Host *host;
260 scsi_qla_host_t *vha;
261 struct qla_hw_data *ha;
262 srb_t *sp;
263 const char *type;
264 int req_sg_cnt, rsp_sg_cnt;
265 int rval = (DID_ERROR << 16);
266 uint16_t nextlid = 0;
267
268 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
269 rport = fc_bsg_to_rport(bsg_job);
270 fcport = *(fc_port_t **) rport->dd_data;
271 host = rport_to_shost(rport);
272 vha = shost_priv(host);
273 ha = vha->hw;
274 type = "FC_BSG_RPT_ELS";
275 } else {
276 host = fc_bsg_to_shost(bsg_job);
277 vha = shost_priv(host);
278 ha = vha->hw;
279 type = "FC_BSG_HST_ELS_NOLOGIN";
280 }
281
282 if (!vha->flags.online) {
283 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
284 rval = -EIO;
285 goto done;
286 }
287
288 /* pass through is supported only for ISP 4Gb or higher */
289 if (!IS_FWI2_CAPABLE(ha)) {
290 ql_dbg(ql_dbg_user, vha, 0x7001,
291 "ELS passthru not supported for ISP23xx based adapters.\n");
292 rval = -EPERM;
293 goto done;
294 }
295
296 /* Multiple SG's are not supported for ELS requests */
297 if (bsg_job->request_payload.sg_cnt > 1 ||
298 bsg_job->reply_payload.sg_cnt > 1) {
299 ql_dbg(ql_dbg_user, vha, 0x7002,
300 "Multiple SG's are not supported for ELS requests, "
301 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
302 bsg_job->request_payload.sg_cnt,
303 bsg_job->reply_payload.sg_cnt);
304 rval = -EPERM;
305 goto done;
306 }
307
308 /* ELS request for rport */
309 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
310 /* make sure the rport is logged in,
311 * if not perform fabric login
312 */
313 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
314 ql_dbg(ql_dbg_user, vha, 0x7003,
315 "Failed to login port %06X for ELS passthru.\n",
316 fcport->d_id.b24);
317 rval = -EIO;
318 goto done;
319 }
320 } else {
321 /* Allocate a dummy fcport structure, since functions
322 * preparing the IOCB and mailbox command retrieves port
323 * specific information from fcport structure. For Host based
324 * ELS commands there will be no fcport structure allocated
325 */
326 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
327 if (!fcport) {
328 rval = -ENOMEM;
329 goto done;
330 }
331
332 /* Initialize all required fields of fcport */
333 fcport->vha = vha;
334 fcport->d_id.b.al_pa =
335 bsg_request->rqst_data.h_els.port_id[0];
336 fcport->d_id.b.area =
337 bsg_request->rqst_data.h_els.port_id[1];
338 fcport->d_id.b.domain =
339 bsg_request->rqst_data.h_els.port_id[2];
340 fcport->loop_id =
341 (fcport->d_id.b.al_pa == 0xFD) ?
342 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
343 }
344
345 req_sg_cnt =
346 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
347 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
348 if (!req_sg_cnt) {
349 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
350 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
351 rval = -ENOMEM;
352 goto done_free_fcport;
353 }
354
355 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
356 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
357 if (!rsp_sg_cnt) {
358 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
359 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
360 rval = -ENOMEM;
361 goto done_free_fcport;
362 }
363
364 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
365 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
366 ql_log(ql_log_warn, vha, 0x7008,
367 "dma mapping resulted in different sg counts, "
368 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
369 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
370 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
371 rval = -EAGAIN;
372 goto done_unmap_sg;
373 }
374
375 /* Alloc SRB structure */
376 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
377 if (!sp) {
378 rval = -ENOMEM;
379 goto done_unmap_sg;
380 }
381
382 sp->type =
383 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
384 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
385 sp->name =
386 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
387 "bsg_els_rpt" : "bsg_els_hst");
388 sp->u.bsg_job = bsg_job;
389 sp->free = qla2x00_bsg_sp_free;
390 sp->done = qla2x00_bsg_job_done;
391
392 ql_dbg(ql_dbg_user, vha, 0x700a,
393 "bsg rqst type: %s els type: %x - loop-id=%x "
394 "portid=%-2x%02x%02x.\n", type,
395 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
396 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
397
398 rval = qla2x00_start_sp(sp);
399 if (rval != QLA_SUCCESS) {
400 ql_log(ql_log_warn, vha, 0x700e,
401 "qla2x00_start_sp failed = %d\n", rval);
402 qla2x00_rel_sp(sp);
403 rval = -EIO;
404 goto done_unmap_sg;
405 }
406 return rval;
407
408 done_unmap_sg:
409 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
410 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
411 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
412 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
413 goto done_free_fcport;
414
415 done_free_fcport:
416 if (bsg_request->msgcode == FC_BSG_RPT_ELS)
417 qla2x00_free_fcport(fcport);
418 done:
419 return rval;
420 }
421
422 static inline uint16_t
qla24xx_calc_ct_iocbs(uint16_t dsds)423 qla24xx_calc_ct_iocbs(uint16_t dsds)
424 {
425 uint16_t iocbs;
426
427 iocbs = 1;
428 if (dsds > 2) {
429 iocbs += (dsds - 2) / 5;
430 if ((dsds - 2) % 5)
431 iocbs++;
432 }
433 return iocbs;
434 }
435
436 static int
qla2x00_process_ct(struct bsg_job * bsg_job)437 qla2x00_process_ct(struct bsg_job *bsg_job)
438 {
439 srb_t *sp;
440 struct fc_bsg_request *bsg_request = bsg_job->request;
441 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
442 scsi_qla_host_t *vha = shost_priv(host);
443 struct qla_hw_data *ha = vha->hw;
444 int rval = (DID_ERROR << 16);
445 int req_sg_cnt, rsp_sg_cnt;
446 uint16_t loop_id;
447 struct fc_port *fcport;
448 char *type = "FC_BSG_HST_CT";
449
450 req_sg_cnt =
451 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
452 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
453 if (!req_sg_cnt) {
454 ql_log(ql_log_warn, vha, 0x700f,
455 "dma_map_sg return %d for request\n", req_sg_cnt);
456 rval = -ENOMEM;
457 goto done;
458 }
459
460 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
461 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
462 if (!rsp_sg_cnt) {
463 ql_log(ql_log_warn, vha, 0x7010,
464 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
465 rval = -ENOMEM;
466 goto done;
467 }
468
469 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
470 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
471 ql_log(ql_log_warn, vha, 0x7011,
472 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
473 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
474 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
475 rval = -EAGAIN;
476 goto done_unmap_sg;
477 }
478
479 if (!vha->flags.online) {
480 ql_log(ql_log_warn, vha, 0x7012,
481 "Host is not online.\n");
482 rval = -EIO;
483 goto done_unmap_sg;
484 }
485
486 loop_id =
487 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
488 >> 24;
489 switch (loop_id) {
490 case 0xFC:
491 loop_id = NPH_SNS;
492 break;
493 case 0xFA:
494 loop_id = vha->mgmt_svr_loop_id;
495 break;
496 default:
497 ql_dbg(ql_dbg_user, vha, 0x7013,
498 "Unknown loop id: %x.\n", loop_id);
499 rval = -EINVAL;
500 goto done_unmap_sg;
501 }
502
503 /* Allocate a dummy fcport structure, since functions preparing the
504 * IOCB and mailbox command retrieves port specific information
505 * from fcport structure. For Host based ELS commands there will be
506 * no fcport structure allocated
507 */
508 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
509 if (!fcport) {
510 ql_log(ql_log_warn, vha, 0x7014,
511 "Failed to allocate fcport.\n");
512 rval = -ENOMEM;
513 goto done_unmap_sg;
514 }
515
516 /* Initialize all required fields of fcport */
517 fcport->vha = vha;
518 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
519 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
520 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
521 fcport->loop_id = loop_id;
522
523 /* Alloc SRB structure */
524 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
525 if (!sp) {
526 ql_log(ql_log_warn, vha, 0x7015,
527 "qla2x00_get_sp failed.\n");
528 rval = -ENOMEM;
529 goto done_free_fcport;
530 }
531
532 sp->type = SRB_CT_CMD;
533 sp->name = "bsg_ct";
534 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
535 sp->u.bsg_job = bsg_job;
536 sp->free = qla2x00_bsg_sp_free;
537 sp->done = qla2x00_bsg_job_done;
538
539 ql_dbg(ql_dbg_user, vha, 0x7016,
540 "bsg rqst type: %s else type: %x - "
541 "loop-id=%x portid=%02x%02x%02x.\n", type,
542 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
543 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
544 fcport->d_id.b.al_pa);
545
546 rval = qla2x00_start_sp(sp);
547 if (rval != QLA_SUCCESS) {
548 ql_log(ql_log_warn, vha, 0x7017,
549 "qla2x00_start_sp failed=%d.\n", rval);
550 qla2x00_rel_sp(sp);
551 rval = -EIO;
552 goto done_free_fcport;
553 }
554 return rval;
555
556 done_free_fcport:
557 qla2x00_free_fcport(fcport);
558 done_unmap_sg:
559 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
560 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
561 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
562 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
563 done:
564 return rval;
565 }
566
567 /* Disable loopback mode */
568 static inline int
qla81xx_reset_loopback_mode(scsi_qla_host_t * vha,uint16_t * config,int wait,int wait2)569 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
570 int wait, int wait2)
571 {
572 int ret = 0;
573 int rval = 0;
574 uint16_t new_config[4];
575 struct qla_hw_data *ha = vha->hw;
576
577 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
578 goto done_reset_internal;
579
580 memset(new_config, 0 , sizeof(new_config));
581 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
582 ENABLE_INTERNAL_LOOPBACK ||
583 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
584 ENABLE_EXTERNAL_LOOPBACK) {
585 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
586 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
587 (new_config[0] & INTERNAL_LOOPBACK_MASK));
588 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
589
590 ha->notify_dcbx_comp = wait;
591 ha->notify_lb_portup_comp = wait2;
592
593 ret = qla81xx_set_port_config(vha, new_config);
594 if (ret != QLA_SUCCESS) {
595 ql_log(ql_log_warn, vha, 0x7025,
596 "Set port config failed.\n");
597 ha->notify_dcbx_comp = 0;
598 ha->notify_lb_portup_comp = 0;
599 rval = -EINVAL;
600 goto done_reset_internal;
601 }
602
603 /* Wait for DCBX complete event */
604 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
605 (DCBX_COMP_TIMEOUT * HZ))) {
606 ql_dbg(ql_dbg_user, vha, 0x7026,
607 "DCBX completion not received.\n");
608 ha->notify_dcbx_comp = 0;
609 ha->notify_lb_portup_comp = 0;
610 rval = -EINVAL;
611 goto done_reset_internal;
612 } else
613 ql_dbg(ql_dbg_user, vha, 0x7027,
614 "DCBX completion received.\n");
615
616 if (wait2 &&
617 !wait_for_completion_timeout(&ha->lb_portup_comp,
618 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
619 ql_dbg(ql_dbg_user, vha, 0x70c5,
620 "Port up completion not received.\n");
621 ha->notify_lb_portup_comp = 0;
622 rval = -EINVAL;
623 goto done_reset_internal;
624 } else
625 ql_dbg(ql_dbg_user, vha, 0x70c6,
626 "Port up completion received.\n");
627
628 ha->notify_dcbx_comp = 0;
629 ha->notify_lb_portup_comp = 0;
630 }
631 done_reset_internal:
632 return rval;
633 }
634
635 /*
636 * Set the port configuration to enable the internal or external loopback
637 * depending on the loopback mode.
638 */
639 static inline int
qla81xx_set_loopback_mode(scsi_qla_host_t * vha,uint16_t * config,uint16_t * new_config,uint16_t mode)640 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
641 uint16_t *new_config, uint16_t mode)
642 {
643 int ret = 0;
644 int rval = 0;
645 unsigned long rem_tmo = 0, current_tmo = 0;
646 struct qla_hw_data *ha = vha->hw;
647
648 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
649 goto done_set_internal;
650
651 if (mode == INTERNAL_LOOPBACK)
652 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
653 else if (mode == EXTERNAL_LOOPBACK)
654 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
655 ql_dbg(ql_dbg_user, vha, 0x70be,
656 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
657
658 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
659
660 ha->notify_dcbx_comp = 1;
661 ret = qla81xx_set_port_config(vha, new_config);
662 if (ret != QLA_SUCCESS) {
663 ql_log(ql_log_warn, vha, 0x7021,
664 "set port config failed.\n");
665 ha->notify_dcbx_comp = 0;
666 rval = -EINVAL;
667 goto done_set_internal;
668 }
669
670 /* Wait for DCBX complete event */
671 current_tmo = DCBX_COMP_TIMEOUT * HZ;
672 while (1) {
673 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
674 current_tmo);
675 if (!ha->idc_extend_tmo || rem_tmo) {
676 ha->idc_extend_tmo = 0;
677 break;
678 }
679 current_tmo = ha->idc_extend_tmo * HZ;
680 ha->idc_extend_tmo = 0;
681 }
682
683 if (!rem_tmo) {
684 ql_dbg(ql_dbg_user, vha, 0x7022,
685 "DCBX completion not received.\n");
686 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
687 /*
688 * If the reset of the loopback mode doesn't work take a FCoE
689 * dump and reset the chip.
690 */
691 if (ret) {
692 qla2xxx_dump_fw(vha);
693 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
694 }
695 rval = -EINVAL;
696 } else {
697 if (ha->flags.idc_compl_status) {
698 ql_dbg(ql_dbg_user, vha, 0x70c3,
699 "Bad status in IDC Completion AEN\n");
700 rval = -EINVAL;
701 ha->flags.idc_compl_status = 0;
702 } else
703 ql_dbg(ql_dbg_user, vha, 0x7023,
704 "DCBX completion received.\n");
705 }
706
707 ha->notify_dcbx_comp = 0;
708 ha->idc_extend_tmo = 0;
709
710 done_set_internal:
711 return rval;
712 }
713
714 static int
qla2x00_process_loopback(struct bsg_job * bsg_job)715 qla2x00_process_loopback(struct bsg_job *bsg_job)
716 {
717 struct fc_bsg_request *bsg_request = bsg_job->request;
718 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
719 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
720 scsi_qla_host_t *vha = shost_priv(host);
721 struct qla_hw_data *ha = vha->hw;
722 int rval;
723 uint8_t command_sent;
724 char *type;
725 struct msg_echo_lb elreq;
726 uint16_t response[MAILBOX_REGISTER_COUNT];
727 uint16_t config[4], new_config[4];
728 uint8_t *fw_sts_ptr;
729 void *req_data = NULL;
730 dma_addr_t req_data_dma;
731 uint32_t req_data_len;
732 uint8_t *rsp_data = NULL;
733 dma_addr_t rsp_data_dma;
734 uint32_t rsp_data_len;
735
736 if (!vha->flags.online) {
737 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
738 return -EIO;
739 }
740
741 memset(&elreq, 0, sizeof(elreq));
742
743 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
744 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
745 DMA_TO_DEVICE);
746
747 if (!elreq.req_sg_cnt) {
748 ql_log(ql_log_warn, vha, 0x701a,
749 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
750 return -ENOMEM;
751 }
752
753 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
754 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
755 DMA_FROM_DEVICE);
756
757 if (!elreq.rsp_sg_cnt) {
758 ql_log(ql_log_warn, vha, 0x701b,
759 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
760 rval = -ENOMEM;
761 goto done_unmap_req_sg;
762 }
763
764 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
765 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
766 ql_log(ql_log_warn, vha, 0x701c,
767 "dma mapping resulted in different sg counts, "
768 "request_sg_cnt: %x dma_request_sg_cnt: %x "
769 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
770 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
771 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
772 rval = -EAGAIN;
773 goto done_unmap_sg;
774 }
775 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
776 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
777 &req_data_dma, GFP_KERNEL);
778 if (!req_data) {
779 ql_log(ql_log_warn, vha, 0x701d,
780 "dma alloc failed for req_data.\n");
781 rval = -ENOMEM;
782 goto done_unmap_sg;
783 }
784
785 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
786 &rsp_data_dma, GFP_KERNEL);
787 if (!rsp_data) {
788 ql_log(ql_log_warn, vha, 0x7004,
789 "dma alloc failed for rsp_data.\n");
790 rval = -ENOMEM;
791 goto done_free_dma_req;
792 }
793
794 /* Copy the request buffer in req_data now */
795 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
796 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
797
798 elreq.send_dma = req_data_dma;
799 elreq.rcv_dma = rsp_data_dma;
800 elreq.transfer_size = req_data_len;
801
802 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
803 elreq.iteration_count =
804 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
805
806 if (atomic_read(&vha->loop_state) == LOOP_READY &&
807 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
808 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
809 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
810 req_data_len == MAX_ELS_FRAME_PAYLOAD &&
811 elreq.options == EXTERNAL_LOOPBACK))) {
812 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
813 ql_dbg(ql_dbg_user, vha, 0x701e,
814 "BSG request type: %s.\n", type);
815 command_sent = INT_DEF_LB_ECHO_CMD;
816 rval = qla2x00_echo_test(vha, &elreq, response);
817 } else {
818 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
819 memset(config, 0, sizeof(config));
820 memset(new_config, 0, sizeof(new_config));
821
822 if (qla81xx_get_port_config(vha, config)) {
823 ql_log(ql_log_warn, vha, 0x701f,
824 "Get port config failed.\n");
825 rval = -EPERM;
826 goto done_free_dma_rsp;
827 }
828
829 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
830 ql_dbg(ql_dbg_user, vha, 0x70c4,
831 "Loopback operation already in "
832 "progress.\n");
833 rval = -EAGAIN;
834 goto done_free_dma_rsp;
835 }
836
837 ql_dbg(ql_dbg_user, vha, 0x70c0,
838 "elreq.options=%04x\n", elreq.options);
839
840 if (elreq.options == EXTERNAL_LOOPBACK)
841 if (IS_QLA8031(ha) || IS_QLA8044(ha))
842 rval = qla81xx_set_loopback_mode(vha,
843 config, new_config, elreq.options);
844 else
845 rval = qla81xx_reset_loopback_mode(vha,
846 config, 1, 0);
847 else
848 rval = qla81xx_set_loopback_mode(vha, config,
849 new_config, elreq.options);
850
851 if (rval) {
852 rval = -EPERM;
853 goto done_free_dma_rsp;
854 }
855
856 type = "FC_BSG_HST_VENDOR_LOOPBACK";
857 ql_dbg(ql_dbg_user, vha, 0x7028,
858 "BSG request type: %s.\n", type);
859
860 command_sent = INT_DEF_LB_LOOPBACK_CMD;
861 rval = qla2x00_loopback_test(vha, &elreq, response);
862
863 if (response[0] == MBS_COMMAND_ERROR &&
864 response[1] == MBS_LB_RESET) {
865 ql_log(ql_log_warn, vha, 0x7029,
866 "MBX command error, Aborting ISP.\n");
867 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
868 qla2xxx_wake_dpc(vha);
869 qla2x00_wait_for_chip_reset(vha);
870 /* Also reset the MPI */
871 if (IS_QLA81XX(ha)) {
872 if (qla81xx_restart_mpi_firmware(vha) !=
873 QLA_SUCCESS) {
874 ql_log(ql_log_warn, vha, 0x702a,
875 "MPI reset failed.\n");
876 }
877 }
878
879 rval = -EIO;
880 goto done_free_dma_rsp;
881 }
882
883 if (new_config[0]) {
884 int ret;
885
886 /* Revert back to original port config
887 * Also clear internal loopback
888 */
889 ret = qla81xx_reset_loopback_mode(vha,
890 new_config, 0, 1);
891 if (ret) {
892 /*
893 * If the reset of the loopback mode
894 * doesn't work take FCoE dump and then
895 * reset the chip.
896 */
897 qla2xxx_dump_fw(vha);
898 set_bit(ISP_ABORT_NEEDED,
899 &vha->dpc_flags);
900 }
901
902 }
903
904 } else {
905 type = "FC_BSG_HST_VENDOR_LOOPBACK";
906 ql_dbg(ql_dbg_user, vha, 0x702b,
907 "BSG request type: %s.\n", type);
908 command_sent = INT_DEF_LB_LOOPBACK_CMD;
909 rval = qla2x00_loopback_test(vha, &elreq, response);
910 }
911 }
912
913 if (rval) {
914 ql_log(ql_log_warn, vha, 0x702c,
915 "Vendor request %s failed.\n", type);
916
917 rval = 0;
918 bsg_reply->result = (DID_ERROR << 16);
919 bsg_reply->reply_payload_rcv_len = 0;
920 } else {
921 ql_dbg(ql_dbg_user, vha, 0x702d,
922 "Vendor request %s completed.\n", type);
923 bsg_reply->result = (DID_OK << 16);
924 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
925 bsg_job->reply_payload.sg_cnt, rsp_data,
926 rsp_data_len);
927 }
928
929 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
930 sizeof(response) + sizeof(uint8_t);
931 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
932 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
933 sizeof(response));
934 fw_sts_ptr += sizeof(response);
935 *fw_sts_ptr = command_sent;
936
937 done_free_dma_rsp:
938 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
939 rsp_data, rsp_data_dma);
940 done_free_dma_req:
941 dma_free_coherent(&ha->pdev->dev, req_data_len,
942 req_data, req_data_dma);
943 done_unmap_sg:
944 dma_unmap_sg(&ha->pdev->dev,
945 bsg_job->reply_payload.sg_list,
946 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
947 done_unmap_req_sg:
948 dma_unmap_sg(&ha->pdev->dev,
949 bsg_job->request_payload.sg_list,
950 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
951 if (!rval)
952 bsg_job_done(bsg_job, bsg_reply->result,
953 bsg_reply->reply_payload_rcv_len);
954 return rval;
955 }
956
957 static int
qla84xx_reset(struct bsg_job * bsg_job)958 qla84xx_reset(struct bsg_job *bsg_job)
959 {
960 struct fc_bsg_request *bsg_request = bsg_job->request;
961 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
962 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
963 scsi_qla_host_t *vha = shost_priv(host);
964 struct qla_hw_data *ha = vha->hw;
965 int rval = 0;
966 uint32_t flag;
967
968 if (!IS_QLA84XX(ha)) {
969 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
970 return -EINVAL;
971 }
972
973 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
974
975 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
976
977 if (rval) {
978 ql_log(ql_log_warn, vha, 0x7030,
979 "Vendor request 84xx reset failed.\n");
980 rval = (DID_ERROR << 16);
981
982 } else {
983 ql_dbg(ql_dbg_user, vha, 0x7031,
984 "Vendor request 84xx reset completed.\n");
985 bsg_reply->result = DID_OK;
986 bsg_job_done(bsg_job, bsg_reply->result,
987 bsg_reply->reply_payload_rcv_len);
988 }
989
990 return rval;
991 }
992
993 static int
qla84xx_updatefw(struct bsg_job * bsg_job)994 qla84xx_updatefw(struct bsg_job *bsg_job)
995 {
996 struct fc_bsg_request *bsg_request = bsg_job->request;
997 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
998 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
999 scsi_qla_host_t *vha = shost_priv(host);
1000 struct qla_hw_data *ha = vha->hw;
1001 struct verify_chip_entry_84xx *mn = NULL;
1002 dma_addr_t mn_dma, fw_dma;
1003 void *fw_buf = NULL;
1004 int rval = 0;
1005 uint32_t sg_cnt;
1006 uint32_t data_len;
1007 uint16_t options;
1008 uint32_t flag;
1009 uint32_t fw_ver;
1010
1011 if (!IS_QLA84XX(ha)) {
1012 ql_dbg(ql_dbg_user, vha, 0x7032,
1013 "Not 84xx, exiting.\n");
1014 return -EINVAL;
1015 }
1016
1017 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1018 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1019 if (!sg_cnt) {
1020 ql_log(ql_log_warn, vha, 0x7033,
1021 "dma_map_sg returned %d for request.\n", sg_cnt);
1022 return -ENOMEM;
1023 }
1024
1025 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1026 ql_log(ql_log_warn, vha, 0x7034,
1027 "DMA mapping resulted in different sg counts, "
1028 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1029 bsg_job->request_payload.sg_cnt, sg_cnt);
1030 rval = -EAGAIN;
1031 goto done_unmap_sg;
1032 }
1033
1034 data_len = bsg_job->request_payload.payload_len;
1035 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1036 &fw_dma, GFP_KERNEL);
1037 if (!fw_buf) {
1038 ql_log(ql_log_warn, vha, 0x7035,
1039 "DMA alloc failed for fw_buf.\n");
1040 rval = -ENOMEM;
1041 goto done_unmap_sg;
1042 }
1043
1044 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1045 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1046
1047 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1048 if (!mn) {
1049 ql_log(ql_log_warn, vha, 0x7036,
1050 "DMA alloc failed for fw buffer.\n");
1051 rval = -ENOMEM;
1052 goto done_free_fw_buf;
1053 }
1054
1055 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1056 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1057
1058 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1059 mn->entry_count = 1;
1060
1061 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1062 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1063 options |= VCO_DIAG_FW;
1064
1065 mn->options = cpu_to_le16(options);
1066 mn->fw_ver = cpu_to_le32(fw_ver);
1067 mn->fw_size = cpu_to_le32(data_len);
1068 mn->fw_seq_size = cpu_to_le32(data_len);
1069 put_unaligned_le64(fw_dma, &mn->dsd.address);
1070 mn->dsd.length = cpu_to_le32(data_len);
1071 mn->data_seg_cnt = cpu_to_le16(1);
1072
1073 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1074
1075 if (rval) {
1076 ql_log(ql_log_warn, vha, 0x7037,
1077 "Vendor request 84xx updatefw failed.\n");
1078
1079 rval = (DID_ERROR << 16);
1080 } else {
1081 ql_dbg(ql_dbg_user, vha, 0x7038,
1082 "Vendor request 84xx updatefw completed.\n");
1083
1084 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1085 bsg_reply->result = DID_OK;
1086 }
1087
1088 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1089
1090 done_free_fw_buf:
1091 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1092
1093 done_unmap_sg:
1094 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1095 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1096
1097 if (!rval)
1098 bsg_job_done(bsg_job, bsg_reply->result,
1099 bsg_reply->reply_payload_rcv_len);
1100 return rval;
1101 }
1102
1103 static int
qla84xx_mgmt_cmd(struct bsg_job * bsg_job)1104 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1105 {
1106 struct fc_bsg_request *bsg_request = bsg_job->request;
1107 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1108 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1109 scsi_qla_host_t *vha = shost_priv(host);
1110 struct qla_hw_data *ha = vha->hw;
1111 struct access_chip_84xx *mn = NULL;
1112 dma_addr_t mn_dma, mgmt_dma;
1113 void *mgmt_b = NULL;
1114 int rval = 0;
1115 struct qla_bsg_a84_mgmt *ql84_mgmt;
1116 uint32_t sg_cnt;
1117 uint32_t data_len = 0;
1118 uint32_t dma_direction = DMA_NONE;
1119
1120 if (!IS_QLA84XX(ha)) {
1121 ql_log(ql_log_warn, vha, 0x703a,
1122 "Not 84xx, exiting.\n");
1123 return -EINVAL;
1124 }
1125
1126 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1127 if (!mn) {
1128 ql_log(ql_log_warn, vha, 0x703c,
1129 "DMA alloc failed for fw buffer.\n");
1130 return -ENOMEM;
1131 }
1132
1133 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1134 mn->entry_count = 1;
1135 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1136 switch (ql84_mgmt->mgmt.cmd) {
1137 case QLA84_MGMT_READ_MEM:
1138 case QLA84_MGMT_GET_INFO:
1139 sg_cnt = dma_map_sg(&ha->pdev->dev,
1140 bsg_job->reply_payload.sg_list,
1141 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1142 if (!sg_cnt) {
1143 ql_log(ql_log_warn, vha, 0x703d,
1144 "dma_map_sg returned %d for reply.\n", sg_cnt);
1145 rval = -ENOMEM;
1146 goto exit_mgmt;
1147 }
1148
1149 dma_direction = DMA_FROM_DEVICE;
1150
1151 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1152 ql_log(ql_log_warn, vha, 0x703e,
1153 "DMA mapping resulted in different sg counts, "
1154 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1155 bsg_job->reply_payload.sg_cnt, sg_cnt);
1156 rval = -EAGAIN;
1157 goto done_unmap_sg;
1158 }
1159
1160 data_len = bsg_job->reply_payload.payload_len;
1161
1162 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1163 &mgmt_dma, GFP_KERNEL);
1164 if (!mgmt_b) {
1165 ql_log(ql_log_warn, vha, 0x703f,
1166 "DMA alloc failed for mgmt_b.\n");
1167 rval = -ENOMEM;
1168 goto done_unmap_sg;
1169 }
1170
1171 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1172 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1173 mn->parameter1 =
1174 cpu_to_le32(
1175 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1176
1177 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1178 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1179 mn->parameter1 =
1180 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1181
1182 mn->parameter2 =
1183 cpu_to_le32(
1184 ql84_mgmt->mgmt.mgmtp.u.info.context);
1185 }
1186 break;
1187
1188 case QLA84_MGMT_WRITE_MEM:
1189 sg_cnt = dma_map_sg(&ha->pdev->dev,
1190 bsg_job->request_payload.sg_list,
1191 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1192
1193 if (!sg_cnt) {
1194 ql_log(ql_log_warn, vha, 0x7040,
1195 "dma_map_sg returned %d.\n", sg_cnt);
1196 rval = -ENOMEM;
1197 goto exit_mgmt;
1198 }
1199
1200 dma_direction = DMA_TO_DEVICE;
1201
1202 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1203 ql_log(ql_log_warn, vha, 0x7041,
1204 "DMA mapping resulted in different sg counts, "
1205 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1206 bsg_job->request_payload.sg_cnt, sg_cnt);
1207 rval = -EAGAIN;
1208 goto done_unmap_sg;
1209 }
1210
1211 data_len = bsg_job->request_payload.payload_len;
1212 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1213 &mgmt_dma, GFP_KERNEL);
1214 if (!mgmt_b) {
1215 ql_log(ql_log_warn, vha, 0x7042,
1216 "DMA alloc failed for mgmt_b.\n");
1217 rval = -ENOMEM;
1218 goto done_unmap_sg;
1219 }
1220
1221 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1222 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1223
1224 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1225 mn->parameter1 =
1226 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1227 break;
1228
1229 case QLA84_MGMT_CHNG_CONFIG:
1230 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1231 mn->parameter1 =
1232 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1233
1234 mn->parameter2 =
1235 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1236
1237 mn->parameter3 =
1238 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1239 break;
1240
1241 default:
1242 rval = -EIO;
1243 goto exit_mgmt;
1244 }
1245
1246 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1247 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1248 mn->dseg_count = cpu_to_le16(1);
1249 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1250 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1251 }
1252
1253 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1254
1255 if (rval) {
1256 ql_log(ql_log_warn, vha, 0x7043,
1257 "Vendor request 84xx mgmt failed.\n");
1258
1259 rval = (DID_ERROR << 16);
1260
1261 } else {
1262 ql_dbg(ql_dbg_user, vha, 0x7044,
1263 "Vendor request 84xx mgmt completed.\n");
1264
1265 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1266 bsg_reply->result = DID_OK;
1267
1268 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1269 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1270 bsg_reply->reply_payload_rcv_len =
1271 bsg_job->reply_payload.payload_len;
1272
1273 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1274 bsg_job->reply_payload.sg_cnt, mgmt_b,
1275 data_len);
1276 }
1277 }
1278
1279 done_unmap_sg:
1280 if (mgmt_b)
1281 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1282
1283 if (dma_direction == DMA_TO_DEVICE)
1284 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1285 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1286 else if (dma_direction == DMA_FROM_DEVICE)
1287 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1288 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1289
1290 exit_mgmt:
1291 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1292
1293 if (!rval)
1294 bsg_job_done(bsg_job, bsg_reply->result,
1295 bsg_reply->reply_payload_rcv_len);
1296 return rval;
1297 }
1298
1299 static int
qla24xx_iidma(struct bsg_job * bsg_job)1300 qla24xx_iidma(struct bsg_job *bsg_job)
1301 {
1302 struct fc_bsg_request *bsg_request = bsg_job->request;
1303 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1304 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1305 scsi_qla_host_t *vha = shost_priv(host);
1306 int rval = 0;
1307 struct qla_port_param *port_param = NULL;
1308 fc_port_t *fcport = NULL;
1309 int found = 0;
1310 uint16_t mb[MAILBOX_REGISTER_COUNT];
1311 uint8_t *rsp_ptr = NULL;
1312
1313 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1314 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1315 return -EINVAL;
1316 }
1317
1318 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1319 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1320 ql_log(ql_log_warn, vha, 0x7048,
1321 "Invalid destination type.\n");
1322 return -EINVAL;
1323 }
1324
1325 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1326 if (fcport->port_type != FCT_TARGET)
1327 continue;
1328
1329 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1330 fcport->port_name, sizeof(fcport->port_name)))
1331 continue;
1332
1333 found = 1;
1334 break;
1335 }
1336
1337 if (!found) {
1338 ql_log(ql_log_warn, vha, 0x7049,
1339 "Failed to find port.\n");
1340 return -EINVAL;
1341 }
1342
1343 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1344 ql_log(ql_log_warn, vha, 0x704a,
1345 "Port is not online.\n");
1346 return -EINVAL;
1347 }
1348
1349 if (fcport->flags & FCF_LOGIN_NEEDED) {
1350 ql_log(ql_log_warn, vha, 0x704b,
1351 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1352 return -EINVAL;
1353 }
1354
1355 if (port_param->mode)
1356 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1357 port_param->speed, mb);
1358 else
1359 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1360 &port_param->speed, mb);
1361
1362 if (rval) {
1363 ql_log(ql_log_warn, vha, 0x704c,
1364 "iiDMA cmd failed for %8phN -- "
1365 "%04x %x %04x %04x.\n", fcport->port_name,
1366 rval, fcport->fp_speed, mb[0], mb[1]);
1367 rval = (DID_ERROR << 16);
1368 } else {
1369 if (!port_param->mode) {
1370 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1371 sizeof(struct qla_port_param);
1372
1373 rsp_ptr = ((uint8_t *)bsg_reply) +
1374 sizeof(struct fc_bsg_reply);
1375
1376 memcpy(rsp_ptr, port_param,
1377 sizeof(struct qla_port_param));
1378 }
1379
1380 bsg_reply->result = DID_OK;
1381 bsg_job_done(bsg_job, bsg_reply->result,
1382 bsg_reply->reply_payload_rcv_len);
1383 }
1384
1385 return rval;
1386 }
1387
1388 static int
qla2x00_optrom_setup(struct bsg_job * bsg_job,scsi_qla_host_t * vha,uint8_t is_update)1389 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1390 uint8_t is_update)
1391 {
1392 struct fc_bsg_request *bsg_request = bsg_job->request;
1393 uint32_t start = 0;
1394 int valid = 0;
1395 struct qla_hw_data *ha = vha->hw;
1396
1397 if (unlikely(pci_channel_offline(ha->pdev)))
1398 return -EINVAL;
1399
1400 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1401 if (start > ha->optrom_size) {
1402 ql_log(ql_log_warn, vha, 0x7055,
1403 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1404 return -EINVAL;
1405 }
1406
1407 if (ha->optrom_state != QLA_SWAITING) {
1408 ql_log(ql_log_info, vha, 0x7056,
1409 "optrom_state %d.\n", ha->optrom_state);
1410 return -EBUSY;
1411 }
1412
1413 ha->optrom_region_start = start;
1414 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1415 if (is_update) {
1416 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1417 valid = 1;
1418 else if (start == (ha->flt_region_boot * 4) ||
1419 start == (ha->flt_region_fw * 4))
1420 valid = 1;
1421 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1422 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1423 IS_QLA28XX(ha))
1424 valid = 1;
1425 if (!valid) {
1426 ql_log(ql_log_warn, vha, 0x7058,
1427 "Invalid start region 0x%x/0x%x.\n", start,
1428 bsg_job->request_payload.payload_len);
1429 return -EINVAL;
1430 }
1431
1432 ha->optrom_region_size = start +
1433 bsg_job->request_payload.payload_len > ha->optrom_size ?
1434 ha->optrom_size - start :
1435 bsg_job->request_payload.payload_len;
1436 ha->optrom_state = QLA_SWRITING;
1437 } else {
1438 ha->optrom_region_size = start +
1439 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1440 ha->optrom_size - start :
1441 bsg_job->reply_payload.payload_len;
1442 ha->optrom_state = QLA_SREADING;
1443 }
1444
1445 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1446 if (!ha->optrom_buffer) {
1447 ql_log(ql_log_warn, vha, 0x7059,
1448 "Read: Unable to allocate memory for optrom retrieval "
1449 "(%x)\n", ha->optrom_region_size);
1450
1451 ha->optrom_state = QLA_SWAITING;
1452 return -ENOMEM;
1453 }
1454
1455 return 0;
1456 }
1457
1458 static int
qla2x00_read_optrom(struct bsg_job * bsg_job)1459 qla2x00_read_optrom(struct bsg_job *bsg_job)
1460 {
1461 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1462 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1463 scsi_qla_host_t *vha = shost_priv(host);
1464 struct qla_hw_data *ha = vha->hw;
1465 int rval = 0;
1466
1467 if (ha->flags.nic_core_reset_hdlr_active)
1468 return -EBUSY;
1469
1470 mutex_lock(&ha->optrom_mutex);
1471 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1472 if (rval) {
1473 mutex_unlock(&ha->optrom_mutex);
1474 return rval;
1475 }
1476
1477 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1478 ha->optrom_region_start, ha->optrom_region_size);
1479
1480 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1481 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1482 ha->optrom_region_size);
1483
1484 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1485 bsg_reply->result = DID_OK;
1486 vfree(ha->optrom_buffer);
1487 ha->optrom_buffer = NULL;
1488 ha->optrom_state = QLA_SWAITING;
1489 mutex_unlock(&ha->optrom_mutex);
1490 bsg_job_done(bsg_job, bsg_reply->result,
1491 bsg_reply->reply_payload_rcv_len);
1492 return rval;
1493 }
1494
1495 static int
qla2x00_update_optrom(struct bsg_job * bsg_job)1496 qla2x00_update_optrom(struct bsg_job *bsg_job)
1497 {
1498 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1499 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1500 scsi_qla_host_t *vha = shost_priv(host);
1501 struct qla_hw_data *ha = vha->hw;
1502 int rval = 0;
1503
1504 mutex_lock(&ha->optrom_mutex);
1505 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1506 if (rval) {
1507 mutex_unlock(&ha->optrom_mutex);
1508 return rval;
1509 }
1510
1511 /* Set the isp82xx_no_md_cap not to capture minidump */
1512 ha->flags.isp82xx_no_md_cap = 1;
1513
1514 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1515 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1516 ha->optrom_region_size);
1517
1518 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1519 ha->optrom_region_start, ha->optrom_region_size);
1520
1521 if (rval) {
1522 bsg_reply->result = -EINVAL;
1523 rval = -EINVAL;
1524 } else {
1525 bsg_reply->result = DID_OK;
1526 }
1527 vfree(ha->optrom_buffer);
1528 ha->optrom_buffer = NULL;
1529 ha->optrom_state = QLA_SWAITING;
1530 mutex_unlock(&ha->optrom_mutex);
1531 bsg_job_done(bsg_job, bsg_reply->result,
1532 bsg_reply->reply_payload_rcv_len);
1533 return rval;
1534 }
1535
1536 static int
qla2x00_update_fru_versions(struct bsg_job * bsg_job)1537 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1538 {
1539 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1540 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1541 scsi_qla_host_t *vha = shost_priv(host);
1542 struct qla_hw_data *ha = vha->hw;
1543 int rval = 0;
1544 uint8_t bsg[DMA_POOL_SIZE];
1545 struct qla_image_version_list *list = (void *)bsg;
1546 struct qla_image_version *image;
1547 uint32_t count;
1548 dma_addr_t sfp_dma;
1549 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1550
1551 if (!sfp) {
1552 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1553 EXT_STATUS_NO_MEMORY;
1554 goto done;
1555 }
1556
1557 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1558 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1559
1560 image = list->version;
1561 count = list->count;
1562 while (count--) {
1563 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1564 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1565 image->field_address.device, image->field_address.offset,
1566 sizeof(image->field_info), image->field_address.option);
1567 if (rval) {
1568 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1569 EXT_STATUS_MAILBOX;
1570 goto dealloc;
1571 }
1572 image++;
1573 }
1574
1575 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1576
1577 dealloc:
1578 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1579
1580 done:
1581 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1582 bsg_reply->result = DID_OK << 16;
1583 bsg_job_done(bsg_job, bsg_reply->result,
1584 bsg_reply->reply_payload_rcv_len);
1585
1586 return 0;
1587 }
1588
1589 static int
qla2x00_read_fru_status(struct bsg_job * bsg_job)1590 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1591 {
1592 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1593 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1594 scsi_qla_host_t *vha = shost_priv(host);
1595 struct qla_hw_data *ha = vha->hw;
1596 int rval = 0;
1597 uint8_t bsg[DMA_POOL_SIZE];
1598 struct qla_status_reg *sr = (void *)bsg;
1599 dma_addr_t sfp_dma;
1600 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1601
1602 if (!sfp) {
1603 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1604 EXT_STATUS_NO_MEMORY;
1605 goto done;
1606 }
1607
1608 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1609 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1610
1611 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1612 sr->field_address.device, sr->field_address.offset,
1613 sizeof(sr->status_reg), sr->field_address.option);
1614 sr->status_reg = *sfp;
1615
1616 if (rval) {
1617 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1618 EXT_STATUS_MAILBOX;
1619 goto dealloc;
1620 }
1621
1622 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1623 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1624
1625 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1626
1627 dealloc:
1628 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1629
1630 done:
1631 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1632 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1633 bsg_reply->result = DID_OK << 16;
1634 bsg_job_done(bsg_job, bsg_reply->result,
1635 bsg_reply->reply_payload_rcv_len);
1636
1637 return 0;
1638 }
1639
1640 static int
qla2x00_write_fru_status(struct bsg_job * bsg_job)1641 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1642 {
1643 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1644 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1645 scsi_qla_host_t *vha = shost_priv(host);
1646 struct qla_hw_data *ha = vha->hw;
1647 int rval = 0;
1648 uint8_t bsg[DMA_POOL_SIZE];
1649 struct qla_status_reg *sr = (void *)bsg;
1650 dma_addr_t sfp_dma;
1651 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1652
1653 if (!sfp) {
1654 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1655 EXT_STATUS_NO_MEMORY;
1656 goto done;
1657 }
1658
1659 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1660 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1661
1662 *sfp = sr->status_reg;
1663 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1664 sr->field_address.device, sr->field_address.offset,
1665 sizeof(sr->status_reg), sr->field_address.option);
1666
1667 if (rval) {
1668 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1669 EXT_STATUS_MAILBOX;
1670 goto dealloc;
1671 }
1672
1673 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1674
1675 dealloc:
1676 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1677
1678 done:
1679 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1680 bsg_reply->result = DID_OK << 16;
1681 bsg_job_done(bsg_job, bsg_reply->result,
1682 bsg_reply->reply_payload_rcv_len);
1683
1684 return 0;
1685 }
1686
1687 static int
qla2x00_write_i2c(struct bsg_job * bsg_job)1688 qla2x00_write_i2c(struct bsg_job *bsg_job)
1689 {
1690 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1691 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1692 scsi_qla_host_t *vha = shost_priv(host);
1693 struct qla_hw_data *ha = vha->hw;
1694 int rval = 0;
1695 uint8_t bsg[DMA_POOL_SIZE];
1696 struct qla_i2c_access *i2c = (void *)bsg;
1697 dma_addr_t sfp_dma;
1698 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1699
1700 if (!sfp) {
1701 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1702 EXT_STATUS_NO_MEMORY;
1703 goto done;
1704 }
1705
1706 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1707 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1708
1709 memcpy(sfp, i2c->buffer, i2c->length);
1710 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1711 i2c->device, i2c->offset, i2c->length, i2c->option);
1712
1713 if (rval) {
1714 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1715 EXT_STATUS_MAILBOX;
1716 goto dealloc;
1717 }
1718
1719 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1720
1721 dealloc:
1722 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1723
1724 done:
1725 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1726 bsg_reply->result = DID_OK << 16;
1727 bsg_job_done(bsg_job, bsg_reply->result,
1728 bsg_reply->reply_payload_rcv_len);
1729
1730 return 0;
1731 }
1732
1733 static int
qla2x00_read_i2c(struct bsg_job * bsg_job)1734 qla2x00_read_i2c(struct bsg_job *bsg_job)
1735 {
1736 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1737 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1738 scsi_qla_host_t *vha = shost_priv(host);
1739 struct qla_hw_data *ha = vha->hw;
1740 int rval = 0;
1741 uint8_t bsg[DMA_POOL_SIZE];
1742 struct qla_i2c_access *i2c = (void *)bsg;
1743 dma_addr_t sfp_dma;
1744 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1745
1746 if (!sfp) {
1747 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1748 EXT_STATUS_NO_MEMORY;
1749 goto done;
1750 }
1751
1752 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1753 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1754
1755 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1756 i2c->device, i2c->offset, i2c->length, i2c->option);
1757
1758 if (rval) {
1759 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1760 EXT_STATUS_MAILBOX;
1761 goto dealloc;
1762 }
1763
1764 memcpy(i2c->buffer, sfp, i2c->length);
1765 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1766 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1767
1768 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1769
1770 dealloc:
1771 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1772
1773 done:
1774 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1775 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1776 bsg_reply->result = DID_OK << 16;
1777 bsg_job_done(bsg_job, bsg_reply->result,
1778 bsg_reply->reply_payload_rcv_len);
1779
1780 return 0;
1781 }
1782
1783 static int
qla24xx_process_bidir_cmd(struct bsg_job * bsg_job)1784 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1785 {
1786 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1787 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1788 scsi_qla_host_t *vha = shost_priv(host);
1789 struct qla_hw_data *ha = vha->hw;
1790 uint32_t rval = EXT_STATUS_OK;
1791 uint16_t req_sg_cnt = 0;
1792 uint16_t rsp_sg_cnt = 0;
1793 uint16_t nextlid = 0;
1794 uint32_t tot_dsds;
1795 srb_t *sp = NULL;
1796 uint32_t req_data_len;
1797 uint32_t rsp_data_len;
1798
1799 /* Check the type of the adapter */
1800 if (!IS_BIDI_CAPABLE(ha)) {
1801 ql_log(ql_log_warn, vha, 0x70a0,
1802 "This adapter is not supported\n");
1803 rval = EXT_STATUS_NOT_SUPPORTED;
1804 goto done;
1805 }
1806
1807 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1808 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1809 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1810 rval = EXT_STATUS_BUSY;
1811 goto done;
1812 }
1813
1814 /* Check if host is online */
1815 if (!vha->flags.online) {
1816 ql_log(ql_log_warn, vha, 0x70a1,
1817 "Host is not online\n");
1818 rval = EXT_STATUS_DEVICE_OFFLINE;
1819 goto done;
1820 }
1821
1822 /* Check if cable is plugged in or not */
1823 if (vha->device_flags & DFLG_NO_CABLE) {
1824 ql_log(ql_log_warn, vha, 0x70a2,
1825 "Cable is unplugged...\n");
1826 rval = EXT_STATUS_INVALID_CFG;
1827 goto done;
1828 }
1829
1830 /* Check if the switch is connected or not */
1831 if (ha->current_topology != ISP_CFG_F) {
1832 ql_log(ql_log_warn, vha, 0x70a3,
1833 "Host is not connected to the switch\n");
1834 rval = EXT_STATUS_INVALID_CFG;
1835 goto done;
1836 }
1837
1838 /* Check if operating mode is P2P */
1839 if (ha->operating_mode != P2P) {
1840 ql_log(ql_log_warn, vha, 0x70a4,
1841 "Host operating mode is not P2p\n");
1842 rval = EXT_STATUS_INVALID_CFG;
1843 goto done;
1844 }
1845
1846 mutex_lock(&ha->selflogin_lock);
1847 if (vha->self_login_loop_id == 0) {
1848 /* Initialize all required fields of fcport */
1849 vha->bidir_fcport.vha = vha;
1850 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1851 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1852 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1853 vha->bidir_fcport.loop_id = vha->loop_id;
1854
1855 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1856 ql_log(ql_log_warn, vha, 0x70a7,
1857 "Failed to login port %06X for bidirectional IOCB\n",
1858 vha->bidir_fcport.d_id.b24);
1859 mutex_unlock(&ha->selflogin_lock);
1860 rval = EXT_STATUS_MAILBOX;
1861 goto done;
1862 }
1863 vha->self_login_loop_id = nextlid - 1;
1864
1865 }
1866 /* Assign the self login loop id to fcport */
1867 mutex_unlock(&ha->selflogin_lock);
1868
1869 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1870
1871 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1872 bsg_job->request_payload.sg_list,
1873 bsg_job->request_payload.sg_cnt,
1874 DMA_TO_DEVICE);
1875
1876 if (!req_sg_cnt) {
1877 rval = EXT_STATUS_NO_MEMORY;
1878 goto done;
1879 }
1880
1881 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1882 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1883 DMA_FROM_DEVICE);
1884
1885 if (!rsp_sg_cnt) {
1886 rval = EXT_STATUS_NO_MEMORY;
1887 goto done_unmap_req_sg;
1888 }
1889
1890 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1891 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1892 ql_dbg(ql_dbg_user, vha, 0x70a9,
1893 "Dma mapping resulted in different sg counts "
1894 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1895 "%x dma_reply_sg_cnt: %x]\n",
1896 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1897 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1898 rval = EXT_STATUS_NO_MEMORY;
1899 goto done_unmap_sg;
1900 }
1901
1902 req_data_len = bsg_job->request_payload.payload_len;
1903 rsp_data_len = bsg_job->reply_payload.payload_len;
1904
1905 if (req_data_len != rsp_data_len) {
1906 rval = EXT_STATUS_BUSY;
1907 ql_log(ql_log_warn, vha, 0x70aa,
1908 "req_data_len != rsp_data_len\n");
1909 goto done_unmap_sg;
1910 }
1911
1912 /* Alloc SRB structure */
1913 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1914 if (!sp) {
1915 ql_dbg(ql_dbg_user, vha, 0x70ac,
1916 "Alloc SRB structure failed\n");
1917 rval = EXT_STATUS_NO_MEMORY;
1918 goto done_unmap_sg;
1919 }
1920
1921 /*Populate srb->ctx with bidir ctx*/
1922 sp->u.bsg_job = bsg_job;
1923 sp->free = qla2x00_bsg_sp_free;
1924 sp->type = SRB_BIDI_CMD;
1925 sp->done = qla2x00_bsg_job_done;
1926
1927 /* Add the read and write sg count */
1928 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1929
1930 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1931 if (rval != EXT_STATUS_OK)
1932 goto done_free_srb;
1933 /* the bsg request will be completed in the interrupt handler */
1934 return rval;
1935
1936 done_free_srb:
1937 mempool_free(sp, ha->srb_mempool);
1938 done_unmap_sg:
1939 dma_unmap_sg(&ha->pdev->dev,
1940 bsg_job->reply_payload.sg_list,
1941 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1942 done_unmap_req_sg:
1943 dma_unmap_sg(&ha->pdev->dev,
1944 bsg_job->request_payload.sg_list,
1945 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1946 done:
1947
1948 /* Return an error vendor specific response
1949 * and complete the bsg request
1950 */
1951 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1952 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1953 bsg_reply->reply_payload_rcv_len = 0;
1954 bsg_reply->result = (DID_OK) << 16;
1955 bsg_job_done(bsg_job, bsg_reply->result,
1956 bsg_reply->reply_payload_rcv_len);
1957 /* Always return success, vendor rsp carries correct status */
1958 return 0;
1959 }
1960
1961 static int
qlafx00_mgmt_cmd(struct bsg_job * bsg_job)1962 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1963 {
1964 struct fc_bsg_request *bsg_request = bsg_job->request;
1965 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1966 scsi_qla_host_t *vha = shost_priv(host);
1967 struct qla_hw_data *ha = vha->hw;
1968 int rval = (DID_ERROR << 16);
1969 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1970 srb_t *sp;
1971 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1972 struct fc_port *fcport;
1973 char *type = "FC_BSG_HST_FX_MGMT";
1974
1975 /* Copy the IOCB specific information */
1976 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1977 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1978
1979 /* Dump the vendor information */
1980 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1981 piocb_rqst, sizeof(*piocb_rqst));
1982
1983 if (!vha->flags.online) {
1984 ql_log(ql_log_warn, vha, 0x70d0,
1985 "Host is not online.\n");
1986 rval = -EIO;
1987 goto done;
1988 }
1989
1990 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1991 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1992 bsg_job->request_payload.sg_list,
1993 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1994 if (!req_sg_cnt) {
1995 ql_log(ql_log_warn, vha, 0x70c7,
1996 "dma_map_sg return %d for request\n", req_sg_cnt);
1997 rval = -ENOMEM;
1998 goto done;
1999 }
2000 }
2001
2002 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2003 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2004 bsg_job->reply_payload.sg_list,
2005 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2006 if (!rsp_sg_cnt) {
2007 ql_log(ql_log_warn, vha, 0x70c8,
2008 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2009 rval = -ENOMEM;
2010 goto done_unmap_req_sg;
2011 }
2012 }
2013
2014 ql_dbg(ql_dbg_user, vha, 0x70c9,
2015 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2016 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2017 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2018
2019 /* Allocate a dummy fcport structure, since functions preparing the
2020 * IOCB and mailbox command retrieves port specific information
2021 * from fcport structure. For Host based ELS commands there will be
2022 * no fcport structure allocated
2023 */
2024 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2025 if (!fcport) {
2026 ql_log(ql_log_warn, vha, 0x70ca,
2027 "Failed to allocate fcport.\n");
2028 rval = -ENOMEM;
2029 goto done_unmap_rsp_sg;
2030 }
2031
2032 /* Alloc SRB structure */
2033 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2034 if (!sp) {
2035 ql_log(ql_log_warn, vha, 0x70cb,
2036 "qla2x00_get_sp failed.\n");
2037 rval = -ENOMEM;
2038 goto done_free_fcport;
2039 }
2040
2041 /* Initialize all required fields of fcport */
2042 fcport->vha = vha;
2043 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2044
2045 sp->type = SRB_FXIOCB_BCMD;
2046 sp->name = "bsg_fx_mgmt";
2047 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2048 sp->u.bsg_job = bsg_job;
2049 sp->free = qla2x00_bsg_sp_free;
2050 sp->done = qla2x00_bsg_job_done;
2051
2052 ql_dbg(ql_dbg_user, vha, 0x70cc,
2053 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2054 type, piocb_rqst->func_type, fcport->loop_id);
2055
2056 rval = qla2x00_start_sp(sp);
2057 if (rval != QLA_SUCCESS) {
2058 ql_log(ql_log_warn, vha, 0x70cd,
2059 "qla2x00_start_sp failed=%d.\n", rval);
2060 mempool_free(sp, ha->srb_mempool);
2061 rval = -EIO;
2062 goto done_free_fcport;
2063 }
2064 return rval;
2065
2066 done_free_fcport:
2067 qla2x00_free_fcport(fcport);
2068
2069 done_unmap_rsp_sg:
2070 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2071 dma_unmap_sg(&ha->pdev->dev,
2072 bsg_job->reply_payload.sg_list,
2073 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2074 done_unmap_req_sg:
2075 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2076 dma_unmap_sg(&ha->pdev->dev,
2077 bsg_job->request_payload.sg_list,
2078 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2079
2080 done:
2081 return rval;
2082 }
2083
2084 static int
qla26xx_serdes_op(struct bsg_job * bsg_job)2085 qla26xx_serdes_op(struct bsg_job *bsg_job)
2086 {
2087 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2088 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2089 scsi_qla_host_t *vha = shost_priv(host);
2090 int rval = 0;
2091 struct qla_serdes_reg sr;
2092
2093 memset(&sr, 0, sizeof(sr));
2094
2095 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2096 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2097
2098 switch (sr.cmd) {
2099 case INT_SC_SERDES_WRITE_REG:
2100 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2101 bsg_reply->reply_payload_rcv_len = 0;
2102 break;
2103 case INT_SC_SERDES_READ_REG:
2104 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2105 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2106 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2107 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2108 break;
2109 default:
2110 ql_dbg(ql_dbg_user, vha, 0x708c,
2111 "Unknown serdes cmd %x.\n", sr.cmd);
2112 rval = -EINVAL;
2113 break;
2114 }
2115
2116 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2117 rval ? EXT_STATUS_MAILBOX : 0;
2118
2119 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2120 bsg_reply->result = DID_OK << 16;
2121 bsg_job_done(bsg_job, bsg_reply->result,
2122 bsg_reply->reply_payload_rcv_len);
2123 return 0;
2124 }
2125
2126 static int
qla8044_serdes_op(struct bsg_job * bsg_job)2127 qla8044_serdes_op(struct bsg_job *bsg_job)
2128 {
2129 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2130 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2131 scsi_qla_host_t *vha = shost_priv(host);
2132 int rval = 0;
2133 struct qla_serdes_reg_ex sr;
2134
2135 memset(&sr, 0, sizeof(sr));
2136
2137 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2138 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2139
2140 switch (sr.cmd) {
2141 case INT_SC_SERDES_WRITE_REG:
2142 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2143 bsg_reply->reply_payload_rcv_len = 0;
2144 break;
2145 case INT_SC_SERDES_READ_REG:
2146 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2147 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2148 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2149 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2150 break;
2151 default:
2152 ql_dbg(ql_dbg_user, vha, 0x7020,
2153 "Unknown serdes cmd %x.\n", sr.cmd);
2154 rval = -EINVAL;
2155 break;
2156 }
2157
2158 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2159 rval ? EXT_STATUS_MAILBOX : 0;
2160
2161 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2162 bsg_reply->result = DID_OK << 16;
2163 bsg_job_done(bsg_job, bsg_reply->result,
2164 bsg_reply->reply_payload_rcv_len);
2165 return 0;
2166 }
2167
2168 static int
qla27xx_get_flash_upd_cap(struct bsg_job * bsg_job)2169 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2170 {
2171 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2172 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2173 scsi_qla_host_t *vha = shost_priv(host);
2174 struct qla_hw_data *ha = vha->hw;
2175 struct qla_flash_update_caps cap;
2176
2177 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2178 return -EPERM;
2179
2180 memset(&cap, 0, sizeof(cap));
2181 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2182 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2183 (uint64_t)ha->fw_attributes_h << 16 |
2184 (uint64_t)ha->fw_attributes;
2185
2186 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2187 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2188 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2189
2190 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2191 EXT_STATUS_OK;
2192
2193 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2194 bsg_reply->result = DID_OK << 16;
2195 bsg_job_done(bsg_job, bsg_reply->result,
2196 bsg_reply->reply_payload_rcv_len);
2197 return 0;
2198 }
2199
2200 static int
qla27xx_set_flash_upd_cap(struct bsg_job * bsg_job)2201 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2202 {
2203 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2204 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2205 scsi_qla_host_t *vha = shost_priv(host);
2206 struct qla_hw_data *ha = vha->hw;
2207 uint64_t online_fw_attr = 0;
2208 struct qla_flash_update_caps cap;
2209
2210 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2211 return -EPERM;
2212
2213 memset(&cap, 0, sizeof(cap));
2214 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2215 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2216
2217 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2218 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2219 (uint64_t)ha->fw_attributes_h << 16 |
2220 (uint64_t)ha->fw_attributes;
2221
2222 if (online_fw_attr != cap.capabilities) {
2223 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2224 EXT_STATUS_INVALID_PARAM;
2225 return -EINVAL;
2226 }
2227
2228 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2229 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2230 EXT_STATUS_INVALID_PARAM;
2231 return -EINVAL;
2232 }
2233
2234 bsg_reply->reply_payload_rcv_len = 0;
2235
2236 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2237 EXT_STATUS_OK;
2238
2239 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2240 bsg_reply->result = DID_OK << 16;
2241 bsg_job_done(bsg_job, bsg_reply->result,
2242 bsg_reply->reply_payload_rcv_len);
2243 return 0;
2244 }
2245
2246 static int
qla27xx_get_bbcr_data(struct bsg_job * bsg_job)2247 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2248 {
2249 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2250 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2251 scsi_qla_host_t *vha = shost_priv(host);
2252 struct qla_hw_data *ha = vha->hw;
2253 struct qla_bbcr_data bbcr;
2254 uint16_t loop_id, topo, sw_cap;
2255 uint8_t domain, area, al_pa, state;
2256 int rval;
2257
2258 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2259 return -EPERM;
2260
2261 memset(&bbcr, 0, sizeof(bbcr));
2262
2263 if (vha->flags.bbcr_enable)
2264 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2265 else
2266 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2267
2268 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2269 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2270 &area, &domain, &topo, &sw_cap);
2271 if (rval != QLA_SUCCESS) {
2272 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2273 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2274 bbcr.mbx1 = loop_id;
2275 goto done;
2276 }
2277
2278 state = (vha->bbcr >> 12) & 0x1;
2279
2280 if (state) {
2281 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2282 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2283 } else {
2284 bbcr.state = QLA_BBCR_STATE_ONLINE;
2285 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2286 }
2287
2288 bbcr.configured_bbscn = vha->bbcr & 0xf;
2289 }
2290
2291 done:
2292 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2293 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2294 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2295
2296 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2297
2298 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2299 bsg_reply->result = DID_OK << 16;
2300 bsg_job_done(bsg_job, bsg_reply->result,
2301 bsg_reply->reply_payload_rcv_len);
2302 return 0;
2303 }
2304
2305 static int
qla2x00_get_priv_stats(struct bsg_job * bsg_job)2306 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2307 {
2308 struct fc_bsg_request *bsg_request = bsg_job->request;
2309 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2310 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2311 scsi_qla_host_t *vha = shost_priv(host);
2312 struct qla_hw_data *ha = vha->hw;
2313 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2314 struct link_statistics *stats = NULL;
2315 dma_addr_t stats_dma;
2316 int rval;
2317 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2318 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2319
2320 if (test_bit(UNLOADING, &vha->dpc_flags))
2321 return -ENODEV;
2322
2323 if (unlikely(pci_channel_offline(ha->pdev)))
2324 return -ENODEV;
2325
2326 if (qla2x00_reset_active(vha))
2327 return -EBUSY;
2328
2329 if (!IS_FWI2_CAPABLE(ha))
2330 return -EPERM;
2331
2332 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2333 GFP_KERNEL);
2334 if (!stats) {
2335 ql_log(ql_log_warn, vha, 0x70e2,
2336 "Failed to allocate memory for stats.\n");
2337 return -ENOMEM;
2338 }
2339
2340 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2341
2342 if (rval == QLA_SUCCESS) {
2343 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2344 stats, sizeof(*stats));
2345 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2346 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2347 }
2348
2349 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2350 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2351 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2352
2353 bsg_job->reply_len = sizeof(*bsg_reply);
2354 bsg_reply->result = DID_OK << 16;
2355 bsg_job_done(bsg_job, bsg_reply->result,
2356 bsg_reply->reply_payload_rcv_len);
2357
2358 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2359 stats, stats_dma);
2360
2361 return 0;
2362 }
2363
2364 static int
qla2x00_do_dport_diagnostics(struct bsg_job * bsg_job)2365 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2366 {
2367 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2368 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2369 scsi_qla_host_t *vha = shost_priv(host);
2370 int rval;
2371 struct qla_dport_diag *dd;
2372
2373 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2374 !IS_QLA28XX(vha->hw))
2375 return -EPERM;
2376
2377 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2378 if (!dd) {
2379 ql_log(ql_log_warn, vha, 0x70db,
2380 "Failed to allocate memory for dport.\n");
2381 return -ENOMEM;
2382 }
2383
2384 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2385 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2386
2387 rval = qla26xx_dport_diagnostics(
2388 vha, dd->buf, sizeof(dd->buf), dd->options);
2389 if (rval == QLA_SUCCESS) {
2390 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2391 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2392 }
2393
2394 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2395 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2396 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2397
2398 bsg_job->reply_len = sizeof(*bsg_reply);
2399 bsg_reply->result = DID_OK << 16;
2400 bsg_job_done(bsg_job, bsg_reply->result,
2401 bsg_reply->reply_payload_rcv_len);
2402
2403 kfree(dd);
2404
2405 return 0;
2406 }
2407
2408 static int
qla2x00_get_flash_image_status(struct bsg_job * bsg_job)2409 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2410 {
2411 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2412 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2413 struct qla_hw_data *ha = vha->hw;
2414 struct qla_active_regions regions = { };
2415 struct active_regions active_regions = { };
2416
2417 qla27xx_get_active_image(vha, &active_regions);
2418 regions.global_image = active_regions.global;
2419
2420 if (IS_QLA28XX(ha)) {
2421 qla28xx_get_aux_images(vha, &active_regions);
2422 regions.board_config = active_regions.aux.board_config;
2423 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2424 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2425 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2426 }
2427
2428 ql_dbg(ql_dbg_user, vha, 0x70e1,
2429 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2430 __func__, vha->host_no, regions.global_image,
2431 regions.board_config, regions.vpd_nvram,
2432 regions.npiv_config_0_1, regions.npiv_config_2_3);
2433
2434 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2435 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions));
2436
2437 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2438 bsg_reply->reply_payload_rcv_len = sizeof(regions);
2439 bsg_reply->result = DID_OK << 16;
2440 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2441 bsg_job_done(bsg_job, bsg_reply->result,
2442 bsg_reply->reply_payload_rcv_len);
2443
2444 return 0;
2445 }
2446
2447 static int
qla2x00_process_vendor_specific(struct bsg_job * bsg_job)2448 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2449 {
2450 struct fc_bsg_request *bsg_request = bsg_job->request;
2451
2452 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2453 case QL_VND_LOOPBACK:
2454 return qla2x00_process_loopback(bsg_job);
2455
2456 case QL_VND_A84_RESET:
2457 return qla84xx_reset(bsg_job);
2458
2459 case QL_VND_A84_UPDATE_FW:
2460 return qla84xx_updatefw(bsg_job);
2461
2462 case QL_VND_A84_MGMT_CMD:
2463 return qla84xx_mgmt_cmd(bsg_job);
2464
2465 case QL_VND_IIDMA:
2466 return qla24xx_iidma(bsg_job);
2467
2468 case QL_VND_FCP_PRIO_CFG_CMD:
2469 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2470
2471 case QL_VND_READ_FLASH:
2472 return qla2x00_read_optrom(bsg_job);
2473
2474 case QL_VND_UPDATE_FLASH:
2475 return qla2x00_update_optrom(bsg_job);
2476
2477 case QL_VND_SET_FRU_VERSION:
2478 return qla2x00_update_fru_versions(bsg_job);
2479
2480 case QL_VND_READ_FRU_STATUS:
2481 return qla2x00_read_fru_status(bsg_job);
2482
2483 case QL_VND_WRITE_FRU_STATUS:
2484 return qla2x00_write_fru_status(bsg_job);
2485
2486 case QL_VND_WRITE_I2C:
2487 return qla2x00_write_i2c(bsg_job);
2488
2489 case QL_VND_READ_I2C:
2490 return qla2x00_read_i2c(bsg_job);
2491
2492 case QL_VND_DIAG_IO_CMD:
2493 return qla24xx_process_bidir_cmd(bsg_job);
2494
2495 case QL_VND_FX00_MGMT_CMD:
2496 return qlafx00_mgmt_cmd(bsg_job);
2497
2498 case QL_VND_SERDES_OP:
2499 return qla26xx_serdes_op(bsg_job);
2500
2501 case QL_VND_SERDES_OP_EX:
2502 return qla8044_serdes_op(bsg_job);
2503
2504 case QL_VND_GET_FLASH_UPDATE_CAPS:
2505 return qla27xx_get_flash_upd_cap(bsg_job);
2506
2507 case QL_VND_SET_FLASH_UPDATE_CAPS:
2508 return qla27xx_set_flash_upd_cap(bsg_job);
2509
2510 case QL_VND_GET_BBCR_DATA:
2511 return qla27xx_get_bbcr_data(bsg_job);
2512
2513 case QL_VND_GET_PRIV_STATS:
2514 case QL_VND_GET_PRIV_STATS_EX:
2515 return qla2x00_get_priv_stats(bsg_job);
2516
2517 case QL_VND_DPORT_DIAGNOSTICS:
2518 return qla2x00_do_dport_diagnostics(bsg_job);
2519
2520 case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2521 return qla2x00_get_flash_image_status(bsg_job);
2522
2523 default:
2524 return -ENOSYS;
2525 }
2526 }
2527
2528 int
qla24xx_bsg_request(struct bsg_job * bsg_job)2529 qla24xx_bsg_request(struct bsg_job *bsg_job)
2530 {
2531 struct fc_bsg_request *bsg_request = bsg_job->request;
2532 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2533 int ret = -EINVAL;
2534 struct fc_rport *rport;
2535 struct Scsi_Host *host;
2536 scsi_qla_host_t *vha;
2537
2538 /* In case no data transferred. */
2539 bsg_reply->reply_payload_rcv_len = 0;
2540
2541 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2542 rport = fc_bsg_to_rport(bsg_job);
2543 host = rport_to_shost(rport);
2544 vha = shost_priv(host);
2545 } else {
2546 host = fc_bsg_to_shost(bsg_job);
2547 vha = shost_priv(host);
2548 }
2549
2550 if (qla2x00_chip_is_down(vha)) {
2551 ql_dbg(ql_dbg_user, vha, 0x709f,
2552 "BSG: ISP abort active/needed -- cmd=%d.\n",
2553 bsg_request->msgcode);
2554 return -EBUSY;
2555 }
2556
2557 ql_dbg(ql_dbg_user, vha, 0x7000,
2558 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2559
2560 switch (bsg_request->msgcode) {
2561 case FC_BSG_RPT_ELS:
2562 case FC_BSG_HST_ELS_NOLOGIN:
2563 ret = qla2x00_process_els(bsg_job);
2564 break;
2565 case FC_BSG_HST_CT:
2566 ret = qla2x00_process_ct(bsg_job);
2567 break;
2568 case FC_BSG_HST_VENDOR:
2569 ret = qla2x00_process_vendor_specific(bsg_job);
2570 break;
2571 case FC_BSG_HST_ADD_RPORT:
2572 case FC_BSG_HST_DEL_RPORT:
2573 case FC_BSG_RPT_CT:
2574 default:
2575 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2576 break;
2577 }
2578 return ret;
2579 }
2580
2581 int
qla24xx_bsg_timeout(struct bsg_job * bsg_job)2582 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2583 {
2584 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2585 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2586 struct qla_hw_data *ha = vha->hw;
2587 srb_t *sp;
2588 int cnt, que;
2589 unsigned long flags;
2590 struct req_que *req;
2591
2592 /* find the bsg job from the active list of commands */
2593 spin_lock_irqsave(&ha->hardware_lock, flags);
2594 for (que = 0; que < ha->max_req_queues; que++) {
2595 req = ha->req_q_map[que];
2596 if (!req)
2597 continue;
2598
2599 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2600 sp = req->outstanding_cmds[cnt];
2601 if (sp) {
2602 if (((sp->type == SRB_CT_CMD) ||
2603 (sp->type == SRB_ELS_CMD_HST) ||
2604 (sp->type == SRB_FXIOCB_BCMD))
2605 && (sp->u.bsg_job == bsg_job)) {
2606 req->outstanding_cmds[cnt] = NULL;
2607 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2608 if (ha->isp_ops->abort_command(sp)) {
2609 ql_log(ql_log_warn, vha, 0x7089,
2610 "mbx abort_command "
2611 "failed.\n");
2612 bsg_reply->result = -EIO;
2613 } else {
2614 ql_dbg(ql_dbg_user, vha, 0x708a,
2615 "mbx abort_command "
2616 "success.\n");
2617 bsg_reply->result = 0;
2618 }
2619 spin_lock_irqsave(&ha->hardware_lock, flags);
2620 goto done;
2621 }
2622 }
2623 }
2624 }
2625 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2626 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2627 bsg_reply->result = -ENXIO;
2628 return 0;
2629
2630 done:
2631 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2632 sp->free(sp);
2633 return 0;
2634 }
2635