Lines Matching defs:io_req

11 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
14 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
21 struct qedf_ioreq *io_req =
26 fcport = io_req->fcport;
27 if (io_req->fcport == NULL) {
34 switch (io_req->cmd_type) {
39 io_req->xid);
44 io_req->xid);
46 qedf_initiate_cleanup(io_req, true);
47 complete(&io_req->abts_done);
54 kref_put(&io_req->refcount, qedf_release_cmd);
57 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
69 io_req->xid);
73 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
75 kref_get(&io_req->refcount);
82 io_req->xid);
83 qedf_initiate_cleanup(io_req, true);
84 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
86 if (io_req->cb_func && io_req->cb_arg) {
87 io_req->cb_func(io_req->cb_arg);
88 io_req->cb_arg = NULL;
90 kref_put(&io_req->refcount, qedf_release_cmd);
94 "xid=0x%x.\n", io_req->xid);
95 qedf_initiate_cleanup(io_req, true);
96 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
97 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
101 "Hit default case, xid=0x%x.\n", io_req->xid);
115 struct qedf_ioreq *io_req;
147 io_req = &cmgr->cmds[i];
148 kfree(io_req->sgl_task_params);
149 kfree(io_req->task_params);
151 if (io_req->sense_buffer)
153 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
154 io_req->sense_buffer_dma);
155 cancel_delayed_work_sync(&io_req->rrq_work);
164 struct qedf_ioreq *io_req =
167 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
168 qedf_send_rrq(io_req);
176 struct qedf_ioreq *io_req;
215 io_req = &cmgr->cmds[i];
216 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
218 io_req->xid = xid++;
220 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
223 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
224 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
226 if (!io_req->sense_buffer) {
233 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
235 if (!io_req->task_params) {
246 io_req->sgl_task_params = kzalloc(
248 if (!io_req->sgl_task_params) {
302 struct qedf_ioreq *io_req = NULL;
337 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
343 if (!io_req->alloc)
352 if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
354 "io_req found to be dirty ox_id = 0x%x.\n",
355 io_req->xid);
358 io_req->flags = 0;
359 io_req->alloc = 1;
364 xid = io_req->xid;
367 io_req->cmd_mgr = cmd_mgr;
368 io_req->fcport = fcport;
371 io_req->sc_cmd = NULL;
372 io_req->lun = -1;
374 /* Hold the io_req against deletion */
375 kref_init(&io_req->refcount); /* ID: 001 */
376 atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
378 /* Bind io_bdt for this io_req */
379 /* Have a static link between io_req and io_bdt_pool */
380 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
383 kref_put(&io_req->refcount, qedf_release_cmd);
386 bd_tbl->io_req = io_req;
387 io_req->cmd_type = cmd_type;
388 io_req->tm_flags = 0;
391 io_req->rx_buf_off = 0;
392 io_req->tx_buf_off = 0;
393 io_req->rx_id = 0xffff; /* No OX_ID */
395 return io_req;
403 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
405 struct qedf_mp_req *mp_req = &(io_req->mp_req);
406 struct qedf_ctx *qedf = io_req->fcport->qedf;
434 struct qedf_ioreq *io_req =
436 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
437 struct qedf_rport *fcport = io_req->fcport;
440 if (io_req->cmd_type == QEDF_SCSI_CMD) {
442 "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
443 io_req, io_req->xid);
444 WARN_ON(io_req->sc_cmd);
447 if (io_req->cmd_type == QEDF_ELS ||
448 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
449 qedf_free_mp_resc(io_req);
453 atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
460 io_req->task_retry_identifier++;
461 io_req->fcport = NULL;
463 clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
464 io_req->cpu = 0;
466 io_req->fcport = NULL;
467 io_req->alloc = 0;
471 static int qedf_map_sg(struct qedf_ioreq *io_req)
473 struct scsi_cmnd *sc = io_req->sc_cmd;
477 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
490 io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
492 if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
493 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
504 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
506 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
517 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
518 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
523 scsi_bufflen(sc), io_req->xid);
528 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
530 struct scsi_cmnd *sc = io_req->sc_cmd;
531 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
535 bd_count = qedf_map_sg(io_req);
543 io_req->bd_tbl->bd_valid = bd_count;
548 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
551 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
557 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD)
558 int_to_scsilun(io_req->tm_lun,
566 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
567 fcp_cmnd->fc_flags = io_req->io_req_flags;
571 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
583 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
587 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
591 struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
595 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
596 struct io_bdt *bd_tbl = io_req->bd_tbl;
608 io_req->task = task_ctx;
610 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
611 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
614 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
619 tx_io_size = io_req->data_xfer_len;
622 rx_io_size = io_req->data_xfer_len;
627 io_req->task_params->context = task_ctx;
628 io_req->task_params->sqe = sqe;
629 io_req->task_params->task_type = task_type;
630 io_req->task_params->tx_io_size = tx_io_size;
631 io_req->task_params->rx_io_size = rx_io_size;
632 io_req->task_params->conn_cid = fcport->fw_cid;
633 io_req->task_params->itid = io_req->xid;
634 io_req->task_params->cq_rss_number = cq_idx;
635 io_req->task_params->is_tape_device = fcport->dev_type;
638 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
640 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
641 io_req->sgl_task_params->sgl_phys_addr.lo =
643 io_req->sgl_task_params->sgl_phys_addr.hi =
645 io_req->sgl_task_params->num_sges = bd_count;
646 io_req->sgl_task_params->total_buffer_size =
647 scsi_bufflen(io_req->sc_cmd);
648 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
649 io_req->sgl_task_params->small_mid_sge = 1;
651 io_req->sgl_task_params->small_mid_sge = 0;
655 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
656 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
659 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
668 init_initiator_rw_fcoe_task(io_req->task_params,
669 io_req->sgl_task_params,
671 io_req->task_retry_identifier, fcp_cmnd);
674 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
680 void qedf_init_mp_task(struct qedf_ioreq *io_req,
683 struct qedf_mp_req *mp_req = &(io_req->mp_req);
684 struct qedf_rport *fcport = io_req->fcport;
685 struct qedf_ctx *qedf = io_req->fcport->qedf;
693 io_req->cmd_type);
702 /* Setup the task from io_req for easy reference */
703 io_req->task = task_ctx;
706 io_req->task_params->context = task_ctx;
707 io_req->task_params->sqe = sqe;
708 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
709 io_req->task_params->tx_io_size = io_req->data_xfer_len;
711 io_req->task_params->rx_io_size = PAGE_SIZE;
712 io_req->task_params->conn_cid = fcport->fw_cid;
713 io_req->task_params->itid = io_req->xid;
715 io_req->task_params->cq_rss_number = 0;
716 io_req->task_params->is_tape_device = fcport->dev_type;
720 fc_hdr->fh_ox_id = io_req->xid;
738 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
755 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
804 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
809 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
816 io_log->task_id = io_req->xid;
828 io_log->refcount = kref_read(&io_req->refcount);
832 io_log->req_cpu = io_req->cpu;
836 io_log->req_cpu = io_req->cpu;
837 io_log->int_cpu = io_req->int_cpu;
841 io_log->sge_type = io_req->sge_type;
850 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
852 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
861 /* Initialize rest of io_req fileds */
862 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
863 qedf_priv(sc_cmd)->io_req = io_req;
864 io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
867 io_req->cpu = smp_processor_id();
870 io_req->io_req_flags = QEDF_READ;
873 io_req->io_req_flags = QEDF_WRITE;
876 io_req->io_req_flags = 0;
880 xid = io_req->xid;
883 if (qedf_build_bd_list_from_sg(io_req)) {
885 /* Release cmd will release io_req, but sc_cmd is assigned */
886 io_req->sc_cmd = NULL;
887 kref_put(&io_req->refcount, qedf_release_cmd);
894 /* Release cmd will release io_req, but sc_cmd is assigned */
895 io_req->sc_cmd = NULL;
896 kref_put(&io_req->refcount, qedf_release_cmd);
901 io_req->lun = (int)sc_cmd->device->lun;
913 /* Release cmd will release io_req, but sc_cmd is assigned */
914 io_req->sc_cmd = NULL;
915 kref_put(&io_req->refcount, qedf_release_cmd);
919 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
925 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
927 if (qedf_io_tracing && io_req->sc_cmd)
928 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
941 struct qedf_ioreq *io_req;
1030 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1031 if (!io_req) {
1037 io_req->sc_cmd = sc_cmd;
1041 if (qedf_post_io_req(fcport, io_req)) {
1042 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1054 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1057 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1058 struct qedf_ctx *qedf = io_req->fcport->qedf;
1064 io_req->fcp_status = FC_GOOD;
1065 io_req->fcp_resid = 0;
1068 io_req->fcp_resid = fcp_rsp->fcp_resid;
1070 io_req->scsi_comp_flags = rsp_flags;
1071 io_req->cdb_status = fcp_rsp->scsi_status_code;
1081 io_req->fcp_rsp_len = fcp_rsp_len;
1082 io_req->fcp_sns_len = fcp_sns_len;
1083 rsp_info = sense_data = io_req->sense_buffer;
1088 io_req->fcp_rsp_code = rsp_info[3];
1090 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1110 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1112 struct scsi_cmnd *sc = io_req->sc_cmd;
1114 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1117 io_req->bd_tbl->bd_valid = 0;
1122 struct qedf_ioreq *io_req)
1133 if (!io_req)
1138 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1139 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1140 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1142 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1143 io_req->xid);
1147 sc_cmd = io_req->sc_cmd;
1155 if (!qedf_priv(sc_cmd)->io_req) {
1157 "io_req is NULL, returned in another context.\n");
1173 fcport = io_req->fcport;
1184 io_req->xid);
1188 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1190 qedf_unmap_sg_list(qedf, io_req);
1193 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1196 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1197 io_req->fcp_rsp_code);
1207 io_req->xid, fcp_rsp->rsp_flags.flags,
1208 io_req->fcp_resid,
1212 if (io_req->cdb_status == 0)
1213 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1215 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1225 switch (io_req->fcp_status) {
1227 if (io_req->cdb_status == 0) {
1231 refcount = kref_read(&io_req->refcount);
1237 sc_cmd->device->lun, io_req->xid,
1240 io_req->cdb_status, io_req->fcp_resid,
1242 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1244 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1245 io_req->cdb_status == SAM_STAT_BUSY) {
1260 if (io_req->cdb_status ==
1267 if (io_req->fcp_resid)
1268 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1302 io_req->fcp_status);
1308 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1314 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1316 io_req->sc_cmd = NULL;
1317 qedf_priv(sc_cmd)->io_req = NULL;
1319 kref_put(&io_req->refcount, qedf_release_cmd);
1323 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1329 if (!io_req) {
1330 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
1334 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1336 "io_req:%p scsi_done handling already done\n",
1337 io_req);
1345 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1347 sc_cmd = io_req->sc_cmd;
1359 if (!qedf_priv(sc_cmd)->io_req) {
1361 "io_req is NULL, returned in another context.\n");
1391 qedf_unmap_sg_list(qedf, io_req);
1394 refcount = kref_read(&io_req->refcount);
1411 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1413 io_req->sc_cmd = NULL;
1414 qedf_priv(sc_cmd)->io_req = NULL;
1416 kref_put(&io_req->refcount, qedf_release_cmd);
1421 * Clear the io_req->sc_cmd backpointer so we don't try to process
1424 io_req->sc_cmd = NULL;
1425 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */
1433 struct qedf_ioreq *io_req)
1436 struct qedf_rport *fcport = io_req->fcport;
1442 "cqe is NULL for io_req %p xid=0x%x\n",
1443 io_req, io_req->xid);
1447 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1448 "xid=0x%x\n", io_req->xid);
1449 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1453 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1475 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1476 io_req->rx_buf_off =
1478 io_req->tx_buf_off =
1480 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1481 rval = qedf_send_rec(io_req);
1483 * We only want to abort the io_req if we
1495 init_completion(&io_req->abts_done);
1496 rval = qedf_initiate_abts(io_req, true);
1503 struct qedf_ioreq *io_req)
1507 if (io_req == NULL) {
1508 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
1512 if (io_req->fcport == NULL) {
1519 "cqe is NULL for io_req %p\n", io_req);
1523 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1524 "xid=0x%x\n", io_req->xid);
1525 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1529 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1536 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
1537 (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
1538 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
1541 io_req->xid);
1550 init_completion(&io_req->abts_done);
1551 rval = qedf_initiate_abts(io_req, true);
1589 struct qedf_ioreq *io_req;
1650 io_req = &cmd_mgr->cmds[i];
1652 if (!io_req)
1654 if (!io_req->fcport)
1659 if (io_req->alloc) {
1660 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1661 if (io_req->cmd_type == QEDF_SCSI_CMD)
1664 io_req->xid);
1672 if (io_req->fcport != fcport)
1678 * NULL, and we drop the ref on the io_req to clean it up.
1680 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1681 refcount = kref_read(&io_req->refcount);
1684 io_req->xid, io_req->cmd_type, refcount);
1686 * free the io_req
1688 if (atomic_read(&io_req->state) ==
1691 (&io_req->rrq_work)) {
1694 io_req->xid);
1696 kref_put(&io_req->refcount,
1704 if (io_req->cmd_type == QEDF_ELS &&
1706 rc = kref_get_unless_zero(&io_req->refcount);
1709 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1710 io_req, io_req->xid);
1713 qedf_initiate_cleanup(io_req, false);
1715 qedf_flush_els_req(qedf, io_req);
1724 if (io_req->cmd_type == QEDF_ABTS) {
1726 rc = kref_get_unless_zero(&io_req->refcount);
1729 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1730 io_req, io_req->xid);
1733 if (lun != -1 && io_req->lun != lun)
1737 "Flushing abort xid=0x%x.\n", io_req->xid);
1739 if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1742 io_req->xid);
1743 kref_put(&io_req->refcount, qedf_release_cmd);
1746 if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1749 io_req->xid);
1750 qedf_initiate_cleanup(io_req, true);
1754 complete(&io_req->abts_done);
1755 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1757 kref_put(&io_req->refcount, qedf_release_cmd);
1763 if (!io_req->sc_cmd)
1765 if (!io_req->sc_cmd->device) {
1768 io_req->sc_cmd);
1770 io_req->sc_cmd = NULL;
1771 qedf_initiate_cleanup(io_req, false);
1772 kref_put(&io_req->refcount, qedf_release_cmd);
1780 rc = kref_get_unless_zero(&io_req->refcount);
1783 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1788 "Cleanup xid=0x%x.\n", io_req->xid);
1792 qedf_initiate_cleanup(io_req, true);
1795 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */
1817 io_req = &cmd_mgr->cmds[i];
1818 if (io_req->fcport &&
1819 io_req->fcport == fcport) {
1821 kref_read(&io_req->refcount);
1823 &io_req->flags);
1825 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1826 io_req, io_req->xid,
1827 io_req->flags,
1828 io_req->sc_cmd,
1830 io_req->cmd_type);
1851 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1854 struct qedf_rport *fcport = io_req->fcport;
1908 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1909 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1910 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1912 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1913 io_req->xid, io_req->sc_cmd);
1920 io_req->cmd_type = QEDF_ABTS;
1923 kref_get(&io_req->refcount);
1925 xid = io_req->xid;
1929 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1931 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1932 refcount = kref_read(&io_req->refcount);
1934 "ABTS io_req xid = 0x%x refcount=%d\n",
1937 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1944 io_req->task_params->sqe = sqe;
1946 init_initiator_abort_fcoe_task(io_req->task_params);
1958 struct qedf_ioreq *io_req)
1962 struct qedf_rport *fcport = io_req->fcport;
1965 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1971 * the io_req to be freed from the other context before we got here.
1976 io_req->xid);
1988 io_req->xid);
1992 if (!cancel_delayed_work(&io_req->timeout_work)) {
2001 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
2002 rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */
2006 io_req->xid);
2013 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
2015 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
2021 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
2028 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
2030 if (io_req->sc_cmd) {
2031 if (!io_req->return_scsi_cmd_on_abts)
2034 io_req->xid);
2035 if (io_req->return_scsi_cmd_on_abts)
2036 qedf_scsi_done(qedf, io_req, DID_ERROR);
2040 complete(&io_req->abts_done);
2042 kref_put(&io_req->refcount, qedf_release_cmd);
2045 int qedf_init_mp_req(struct qedf_ioreq *io_req)
2050 struct qedf_ctx *qedf = io_req->fcport->qedf;
2056 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2059 if (io_req->cmd_type != QEDF_ELS) {
2061 io_req->data_xfer_len = mp_req->req_len;
2063 mp_req->req_len = io_req->data_xfer_len;
2069 qedf_free_mp_resc(io_req);
2078 qedf_free_mp_resc(io_req);
2088 qedf_free_mp_resc(io_req);
2096 qedf_free_mp_resc(io_req);
2149 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2161 fcport = io_req->fcport;
2179 if (io_req->cmd_type == QEDF_ELS) {
2183 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
2184 test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2185 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2187 io_req->xid);
2190 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2197 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2201 if (io_req->cmd_type == QEDF_CLEANUP) {
2203 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2204 io_req->xid, io_req->cmd_type);
2205 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2209 refcount = kref_read(&io_req->refcount);
2213 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2218 io_req->cmd_type = QEDF_CLEANUP;
2220 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2222 init_completion(&io_req->cleanup_done);
2229 io_req->task_params->sqe = sqe;
2231 init_initiator_cleanup_fcoe_task(io_req->task_params);
2236 tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2243 "xid=%x.\n", io_req->xid);
2244 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2253 if (io_req->tm_flags == FCP_TMF_LUN_RESET ||
2254 io_req->tm_flags == FCP_TMF_TGT_RESET) {
2255 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2256 io_req->sc_cmd = NULL;
2257 kref_put(&io_req->refcount, qedf_release_cmd);
2258 complete(&io_req->tm_done);
2261 if (io_req->sc_cmd) {
2262 if (!io_req->return_scsi_cmd_on_abts)
2265 io_req->xid);
2266 if (io_req->return_scsi_cmd_on_abts)
2267 qedf_scsi_done(qedf, io_req, DID_ERROR);
2271 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2273 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2279 struct qedf_ioreq *io_req)
2282 io_req->xid);
2284 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2287 complete(&io_req->cleanup_done);
2293 struct qedf_ioreq *io_req;
2310 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2311 if (!io_req) {
2322 /* Initialize rest of io_req fields */
2323 io_req->sc_cmd = NULL;
2324 io_req->fcport = fcport;
2325 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2328 io_req->io_req_flags = QEDF_READ;
2329 io_req->data_xfer_len = 0;
2330 io_req->tm_flags = tm_flags;
2333 io_req->return_scsi_cmd_on_abts = false;
2334 io_req->tm_lun = tm_lun;
2337 xid = io_req->xid;
2339 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2345 init_completion(&io_req->tm_done);
2350 io_req->cpu = smp_processor_id();
2356 qedf_init_task(fcport, lport, io_req, task, sqe);
2361 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2362 tmo = wait_for_completion_timeout(&io_req->tm_done,
2369 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2370 io_req->sc_cmd = NULL;
2373 if (io_req->fcp_rsp_code == 0)
2387 /* We do not need this io_req any more */
2388 kref_put(&io_req->refcount, qedf_release_cmd);
2477 struct qedf_ioreq *io_req)
2481 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2484 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2486 complete(&io_req->tm_done);