Lines Matching +full:supports +full:- +full:cqe

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2016-2018 Cavium Inc.
58 static int qedf_default_prio = -1;
81 "supports. (default 0xffffffff)");
106 "during probe (0-3: 0 more verbose).");
125 vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT); in qedf_set_vlan_id()
126 qedf->vlan_id = vlan_id_tmp; in qedf_set_vlan_id()
127 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_set_vlan_id()
129 vlan_id_tmp, qedf->prio); in qedf_set_vlan_id()
136 while (qedf->fipvlan_retries--) { in qedf_initiate_fipvlan_req()
138 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { in qedf_initiate_fipvlan_req()
139 QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n"); in qedf_initiate_fipvlan_req()
143 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { in qedf_initiate_fipvlan_req()
144 QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n"); in qedf_initiate_fipvlan_req()
148 if (qedf->vlan_id > 0) { in qedf_initiate_fipvlan_req()
149 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_initiate_fipvlan_req()
151 qedf->vlan_id); in qedf_initiate_fipvlan_req()
152 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) in qedf_initiate_fipvlan_req()
153 fcoe_ctlr_link_up(&qedf->ctlr); in qedf_initiate_fipvlan_req()
157 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_initiate_fipvlan_req()
158 "Retry %d.\n", qedf->fipvlan_retries); in qedf_initiate_fipvlan_req()
159 init_completion(&qedf->fipvlan_compl); in qedf_initiate_fipvlan_req()
161 wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ); in qedf_initiate_fipvlan_req()
173 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n", in qedf_handle_link_update()
174 atomic_read(&qedf->link_state)); in qedf_handle_link_update()
176 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { in qedf_handle_link_update()
181 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { in qedf_handle_link_update()
182 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_handle_link_update()
184 qedf->vlan_id = 0; in qedf_handle_link_update()
193 QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN " in qedf_handle_link_update()
202 eth_zero_addr(qedf->data_src_addr); in qedf_handle_link_update()
203 fcoe_ctlr_link_up(&qedf->ctlr); in qedf_handle_link_update()
204 } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { in qedf_handle_link_update()
210 atomic_set(&qedf->link_down_tmo_valid, 0); in qedf_handle_link_update()
211 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_handle_link_update()
213 fcoe_ctlr_link_down(&qedf->ctlr); in qedf_handle_link_update()
215 QEDF_ERR(&qedf->dbg_ctx, in qedf_handle_link_update()
218 qedf->fipvlan_retries = qedf_fipvlan_retries; in qedf_handle_link_update()
233 granted_mac = fr_cb(fp)->granted_mac; in qedf_set_data_src_addr()
239 * If granted_mac is non-zero, we used that. in qedf_set_data_src_addr()
241 * the sel_fcf->fc_map and the d_id fo the FLOGI frame. in qedf_set_data_src_addr()
242 * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the in qedf_set_data_src_addr()
246 ether_addr_copy(qedf->data_src_addr, granted_mac); in qedf_set_data_src_addr()
248 } else if (qedf->ctlr.sel_fcf->fc_map != 0) { in qedf_set_data_src_addr()
249 hton24(fc_map, qedf->ctlr.sel_fcf->fc_map); in qedf_set_data_src_addr()
250 qedf->data_src_addr[0] = fc_map[0]; in qedf_set_data_src_addr()
251 qedf->data_src_addr[1] = fc_map[1]; in qedf_set_data_src_addr()
252 qedf->data_src_addr[2] = fc_map[2]; in qedf_set_data_src_addr()
253 qedf->data_src_addr[3] = fh->fh_d_id[0]; in qedf_set_data_src_addr()
254 qedf->data_src_addr[4] = fh->fh_d_id[1]; in qedf_set_data_src_addr()
255 qedf->data_src_addr[5] = fh->fh_d_id[2]; in qedf_set_data_src_addr()
258 fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id); in qedf_set_data_src_addr()
262 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_set_data_src_addr()
263 "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method); in qedf_set_data_src_addr()
270 struct fc_lport *lport = exch->lp; in qedf_flogi_resp()
283 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, in qedf_flogi_resp()
290 qedf->flogi_failed++; in qedf_flogi_resp()
294 qedf->flogi_pending = 0; in qedf_flogi_resp()
298 complete(&qedf->flogi_compl); in qedf_flogi_resp()
319 qedf->flogi_cnt++; in qedf_elsct_send()
320 if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) { in qedf_elsct_send()
321 schedule_delayed_work(&qedf->stag_work, 2); in qedf_elsct_send()
324 qedf->flogi_pending++; in qedf_elsct_send()
337 lport = qedf->lport; in qedf_send_flogi()
339 if (!lport->tt.elsct_send) { in qedf_send_flogi()
340 QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n"); in qedf_send_flogi()
341 return -EINVAL; in qedf_send_flogi()
346 QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n"); in qedf_send_flogi()
347 return -ENOMEM; in qedf_send_flogi()
350 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, in qedf_send_flogi()
352 lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, in qedf_send_flogi()
353 ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov); in qedf_send_flogi()
355 init_completion(&qedf->flogi_compl); in qedf_send_flogi()
369 struct fc_lport *lport = qedf->lport; in qedf_link_recovery()
378 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_link_recovery()
385 qedf->ctlr.state = FIP_ST_LINK_WAIT; in qedf_link_recovery()
386 fcoe_ctlr_link_down(&qedf->ctlr); in qedf_link_recovery()
392 fcoe_ctlr_link_up(&qedf->ctlr); in qedf_link_recovery()
395 qedf->fipvlan_retries = qedf_fipvlan_retries; in qedf_link_recovery()
406 if (qedf->ctlr.sel_fcf) { in qedf_link_recovery()
407 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_link_recovery()
412 retries--; in qedf_link_recovery()
416 QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for " in qedf_link_recovery()
426 i = wait_for_completion_timeout(&qedf->flogi_compl, in qedf_link_recovery()
427 qedf->lport->r_a_tov); in qedf_link_recovery()
429 QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n"); in qedf_link_recovery()
434 * Call lport->tt.rport_login which will cause libfc to send an in qedf_link_recovery()
437 mutex_lock(&lport->disc.disc_mutex); in qedf_link_recovery()
438 list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { in qedf_link_recovery()
439 if (kref_get_unless_zero(&rdata->kref)) { in qedf_link_recovery()
441 kref_put(&rdata->kref, fc_rport_destroy); in qedf_link_recovery()
444 mutex_unlock(&lport->disc.disc_mutex); in qedf_link_recovery()
451 struct fc_lport *lport = qedf->lport; in qedf_update_link_speed()
453 lport->link_speed = FC_PORTSPEED_UNKNOWN; in qedf_update_link_speed()
454 lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; in qedf_update_link_speed()
457 switch (link->speed) { in qedf_update_link_speed()
459 lport->link_speed = FC_PORTSPEED_10GBIT; in qedf_update_link_speed()
462 lport->link_speed = FC_PORTSPEED_25GBIT; in qedf_update_link_speed()
465 lport->link_speed = FC_PORTSPEED_40GBIT; in qedf_update_link_speed()
468 lport->link_speed = FC_PORTSPEED_50GBIT; in qedf_update_link_speed()
471 lport->link_speed = FC_PORTSPEED_100GBIT; in qedf_update_link_speed()
474 lport->link_speed = FC_PORTSPEED_20GBIT; in qedf_update_link_speed()
477 lport->link_speed = FC_PORTSPEED_UNKNOWN; in qedf_update_link_speed()
496 if (linkmode_intersects(link->supported_caps, sup_caps)) in qedf_update_link_speed()
497 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; in qedf_update_link_speed()
504 if (linkmode_intersects(link->supported_caps, sup_caps)) in qedf_update_link_speed()
505 lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; in qedf_update_link_speed()
513 if (linkmode_intersects(link->supported_caps, sup_caps)) in qedf_update_link_speed()
514 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; in qedf_update_link_speed()
521 if (linkmode_intersects(link->supported_caps, sup_caps)) in qedf_update_link_speed()
522 lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; in qedf_update_link_speed()
530 if (linkmode_intersects(link->supported_caps, sup_caps)) in qedf_update_link_speed()
531 lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; in qedf_update_link_speed()
536 if (linkmode_intersects(link->supported_caps, sup_caps)) in qedf_update_link_speed()
537 lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; in qedf_update_link_speed()
539 fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; in qedf_update_link_speed()
548 qed_ops->common->get_link(qedf->cdev, &link); in qedf_bw_update()
550 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { in qedf_bw_update()
551 QEDF_ERR(&qedf->dbg_ctx, in qedf_bw_update()
557 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) in qedf_bw_update()
560 QEDF_ERR(&qedf->dbg_ctx, in qedf_bw_update()
564 QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n"); in qedf_bw_update()
576 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { in qedf_link_update()
577 QEDF_ERR(&qedf->dbg_ctx, in qedf_link_update()
582 if (link->link_up) { in qedf_link_update()
583 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { in qedf_link_update()
584 QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_link_update()
588 QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n", in qedf_link_update()
589 link->speed / 1000); in qedf_link_update()
592 cancel_delayed_work(&qedf->link_update); in qedf_link_update()
594 atomic_set(&qedf->link_state, QEDF_LINK_UP); in qedf_link_update()
597 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE || in qedf_link_update()
599 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_link_update()
601 if (atomic_read(&qedf->link_down_tmo_valid) > 0) in qedf_link_update()
602 queue_delayed_work(qedf->link_update_wq, in qedf_link_update()
603 &qedf->link_recovery, 0); in qedf_link_update()
605 queue_delayed_work(qedf->link_update_wq, in qedf_link_update()
606 &qedf->link_update, 0); in qedf_link_update()
607 atomic_set(&qedf->link_down_tmo_valid, 0); in qedf_link_update()
611 QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n"); in qedf_link_update()
613 atomic_set(&qedf->link_state, QEDF_LINK_DOWN); in qedf_link_update()
614 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); in qedf_link_update()
620 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_link_update()
622 atomic_set(&qedf->link_down_tmo_valid, 1); in qedf_link_update()
624 qedf->vlan_id = 0; in qedf_link_update()
626 queue_delayed_work(qedf->link_update_wq, &qedf->link_update, in qedf_link_update()
637 QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe " in qedf_dcbx_handler()
638 "prio=%d.\n", get->operational.valid, get->operational.enabled, in qedf_dcbx_handler()
639 get->operational.app_prio.fcoe); in qedf_dcbx_handler()
641 if (get->operational.enabled && get->operational.valid) { in qedf_dcbx_handler()
643 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) { in qedf_dcbx_handler()
644 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_dcbx_handler()
649 atomic_set(&qedf->dcbx, QEDF_DCBX_DONE); in qedf_dcbx_handler()
658 tmp_prio = get->operational.app_prio.fcoe; in qedf_dcbx_handler()
659 if (qedf_default_prio > -1) in qedf_dcbx_handler()
660 qedf->prio = qedf_default_prio; in qedf_dcbx_handler()
662 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_dcbx_handler()
665 qedf->prio = QEDF_DEFAULT_PRIO; in qedf_dcbx_handler()
667 qedf->prio = tmp_prio; in qedf_dcbx_handler()
669 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP && in qedf_dcbx_handler()
671 if (atomic_read(&qedf->link_down_tmo_valid) > 0) in qedf_dcbx_handler()
672 queue_delayed_work(qedf->link_update_wq, in qedf_dcbx_handler()
673 &qedf->link_recovery, 0); in qedf_dcbx_handler()
675 queue_delayed_work(qedf->link_update_wq, in qedf_dcbx_handler()
676 &qedf->link_update, 0); in qedf_dcbx_handler()
677 atomic_set(&qedf->link_down_tmo_valid, 0); in qedf_dcbx_handler()
688 return qedf->flogi_failed; in qedf_get_login_failures()
715 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); in qedf_eh_abort()
719 struct fc_rport_libfc_priv *rp = rport->dd_data; in qedf_eh_abort()
728 lport = shost_priv(sc_cmd->device->host); in qedf_eh_abort()
731 /* rport and tgt are allocated together, so tgt should be non-NULL */ in qedf_eh_abort()
733 rdata = fcport->rdata; in qedf_eh_abort()
734 if (!rdata || !kref_get_unless_zero(&rdata->kref)) { in qedf_eh_abort()
735 QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd); in qedf_eh_abort()
741 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr; in qedf_eh_abort()
743 QEDF_ERR(&qedf->dbg_ctx, in qedf_eh_abort()
745 sc_cmd, sc_cmd->cmnd[0], in qedf_eh_abort()
746 rdata->ids.port_id); in qedf_eh_abort()
751 rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */ in qedf_eh_abort()
756 if (!rval || io_req->sc_cmd != sc_cmd) { in qedf_eh_abort()
757 QEDF_ERR(&qedf->dbg_ctx, in qedf_eh_abort()
758 "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n", in qedf_eh_abort()
759 io_req->sc_cmd, sc_cmd, rdata->ids.port_id); in qedf_eh_abort()
765 refcount = kref_read(&io_req->refcount); in qedf_eh_abort()
766 QEDF_ERR(&qedf->dbg_ctx, in qedf_eh_abort()
768 io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0], in qedf_eh_abort()
769 refcount, rdata->ids.port_id); in qedf_eh_abort()
778 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_eh_abort()
779 QEDF_ERR(&qedf->dbg_ctx, in qedf_eh_abort()
781 io_req->xid, rdata->ids.port_id); in qedf_eh_abort()
782 while (io_req->sc_cmd && (wait_count != 0)) { in qedf_eh_abort()
784 wait_count--; in qedf_eh_abort()
787 QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n"); in qedf_eh_abort()
790 QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n"); in qedf_eh_abort()
796 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { in qedf_eh_abort()
797 QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n"); in qedf_eh_abort()
801 QEDF_ERR(&qedf->dbg_ctx, in qedf_eh_abort()
803 io_req, sc_cmd, io_req->xid, io_req->fp_idx, in qedf_eh_abort()
804 rdata->ids.port_id); in qedf_eh_abort()
806 if (qedf->stop_io_on_error) { in qedf_eh_abort()
812 init_completion(&io_req->abts_done); in qedf_eh_abort()
815 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); in qedf_eh_abort()
825 wait_for_completion(&io_req->abts_done); in qedf_eh_abort()
827 if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS || in qedf_eh_abort()
828 io_req->event == QEDF_IOREQ_EV_ABORT_FAILED || in qedf_eh_abort()
829 io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) { in qedf_eh_abort()
842 QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n", in qedf_eh_abort()
843 io_req->xid); in qedf_eh_abort()
845 QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n", in qedf_eh_abort()
846 io_req->xid); in qedf_eh_abort()
849 kref_put(&rdata->kref, fc_rport_destroy); in qedf_eh_abort()
852 kref_put(&io_req->refcount, qedf_release_cmd); in qedf_eh_abort()
859 sc_cmd->device->host->host_no, sc_cmd->device->id, in qedf_eh_target_reset()
860 sc_cmd->device->lun); in qedf_eh_target_reset()
867 sc_cmd->device->host->host_no, sc_cmd->device->id, in qedf_eh_device_reset()
868 sc_cmd->device->lun); in qedf_eh_device_reset()
877 while (wait_cnt--) { in qedf_wait_for_upload()
878 if (atomic_read(&qedf->num_offloads)) in qedf_wait_for_upload()
879 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_wait_for_upload()
881 atomic_read(&qedf->num_offloads)); in qedf_wait_for_upload()
888 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { in qedf_wait_for_upload()
890 &fcport->flags)) { in qedf_wait_for_upload()
891 if (fcport->rdata) in qedf_wait_for_upload()
892 QEDF_ERR(&qedf->dbg_ctx, in qedf_wait_for_upload()
894 fcport, fcport->rdata->ids.port_id); in qedf_wait_for_upload()
896 QEDF_ERR(&qedf->dbg_ctx, in qedf_wait_for_upload()
911 if (lport->vport) { in qedf_ctx_soft_reset()
918 qedf->flogi_pending = 0; in qedf_ctx_soft_reset()
920 atomic_set(&qedf->link_state, QEDF_LINK_DOWN); in qedf_ctx_soft_reset()
921 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_ctx_soft_reset()
923 queue_delayed_work(qedf->link_update_wq, &qedf->link_update, in qedf_ctx_soft_reset()
927 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); in qedf_ctx_soft_reset()
928 WARN_ON(atomic_read(&qedf->num_offloads)); in qedf_ctx_soft_reset()
932 qed_ops->common->get_link(qedf->cdev, &if_link); in qedf_ctx_soft_reset()
935 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_ctx_soft_reset()
940 flush_delayed_work(&qedf->link_update); in qedf_ctx_soft_reset()
943 atomic_set(&qedf->link_state, QEDF_LINK_UP); in qedf_ctx_soft_reset()
944 qedf->vlan_id = 0; in qedf_ctx_soft_reset()
945 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_ctx_soft_reset()
947 queue_delayed_work(qedf->link_update_wq, &qedf->link_update, in qedf_ctx_soft_reset()
957 lport = shost_priv(sc_cmd->device->host); in qedf_eh_host_reset()
960 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN || in qedf_eh_host_reset()
961 test_bit(QEDF_UNLOADING, &qedf->flags)) in qedf_eh_host_reset()
964 QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued..."); in qedf_eh_host_reset()
983 .this_id = -1,
1016 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { in qedf_fcport_lookup()
1017 rdata = fcport->rdata; in qedf_fcport_lookup()
1020 if (rdata->ids.port_id == port_id) { in qedf_fcport_lookup()
1038 if ((fh->fh_type == FC_TYPE_ELS) && in qedf_xmit_l2_frame()
1039 (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { in qedf_xmit_l2_frame()
1052 * qedf_xmit - qedf FCoE frame transmit function
1080 if (lport->vport) in qedf_xmit()
1081 base_lport = shost_priv(vport_to_shost(lport->vport)); in qedf_xmit()
1086 if (base_lport->port_id == ntoh24(fh->fh_d_id)) { in qedf_xmit()
1092 list_for_each_entry(tmp_lport, &base_lport->vports, list) { in qedf_xmit()
1093 if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) { in qedf_xmit()
1099 if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) { in qedf_xmit()
1102 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, in qedf_xmit()
1103 "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id)); in qedf_xmit()
1105 rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id)); in qedf_xmit()
1107 rdata->retries = lport->max_rport_retry_count; in qedf_xmit()
1108 kref_put(&rdata->kref, fc_rport_destroy); in qedf_xmit()
1110 return -EINVAL; in qedf_xmit()
1114 if (!qedf->ctlr.sel_fcf) { in qedf_xmit()
1119 if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) { in qedf_xmit()
1120 QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n"); in qedf_xmit()
1125 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { in qedf_xmit()
1126 QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n"); in qedf_xmit()
1131 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { in qedf_xmit()
1132 if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb)) in qedf_xmit()
1137 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); in qedf_xmit()
1139 if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_xmit()
1155 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; in qedf_xmit()
1157 skb->ip_summed = CHECKSUM_NONE; in qedf_xmit()
1166 return -ENOMEM; in qedf_xmit()
1168 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; in qedf_xmit()
1175 cp->fcoe_eof = eof; in qedf_xmit()
1176 cp->fcoe_crc32 = cpu_to_le32(~crc); in qedf_xmit()
1187 skb->mac_len = elen; in qedf_xmit()
1188 skb->protocol = htons(ETH_P_FCOE); in qedf_xmit()
1191 * Add VLAN tag to non-offload FCoE frame based on current stored VLAN in qedf_xmit()
1194 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id); in qedf_xmit()
1198 eh->h_proto = htons(ETH_P_FCOE); in qedf_xmit()
1199 if (qedf->ctlr.map_dest) in qedf_xmit()
1200 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); in qedf_xmit()
1203 ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr); in qedf_xmit()
1206 ether_addr_copy(eh->h_source, qedf->data_src_addr); in qedf_xmit()
1212 hp->fcoe_sof = sof; in qedf_xmit()
1215 stats = per_cpu_ptr(lport->stats, get_cpu()); in qedf_xmit()
1216 stats->TxFrames++; in qedf_xmit()
1217 stats->TxWords += wlen; in qedf_xmit()
1225 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: " in qedf_xmit()
1227 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type, in qedf_xmit()
1231 1, skb->data, skb->len, false); in qedf_xmit()
1232 rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); in qedf_xmit()
1234 QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc); in qedf_xmit()
1250 fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe); in qedf_alloc_sq()
1251 fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE); in qedf_alloc_sq()
1252 fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) * in qedf_alloc_sq()
1254 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; in qedf_alloc_sq()
1256 fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, in qedf_alloc_sq()
1257 &fcport->sq_dma, GFP_KERNEL); in qedf_alloc_sq()
1258 if (!fcport->sq) { in qedf_alloc_sq()
1259 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); in qedf_alloc_sq()
1264 fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_sq()
1265 fcport->sq_pbl_size, in qedf_alloc_sq()
1266 &fcport->sq_pbl_dma, GFP_KERNEL); in qedf_alloc_sq()
1267 if (!fcport->sq_pbl) { in qedf_alloc_sq()
1268 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); in qedf_alloc_sq()
1274 num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE; in qedf_alloc_sq()
1275 page = fcport->sq_dma; in qedf_alloc_sq()
1276 pbl = (u32 *)fcport->sq_pbl; in qedf_alloc_sq()
1278 while (num_pages--) { in qedf_alloc_sq()
1289 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq, in qedf_alloc_sq()
1290 fcport->sq_dma); in qedf_alloc_sq()
1297 if (fcport->sq_pbl) in qedf_free_sq()
1298 dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size, in qedf_free_sq()
1299 fcport->sq_pbl, fcport->sq_pbl_dma); in qedf_free_sq()
1300 if (fcport->sq) in qedf_free_sq()
1301 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, in qedf_free_sq()
1302 fcport->sq, fcport->sq_dma); in qedf_free_sq()
1311 uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe)); in qedf_offload_connection()
1313 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection " in qedf_offload_connection()
1314 "portid=%06x.\n", fcport->rdata->ids.port_id); in qedf_offload_connection()
1315 rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle, in qedf_offload_connection()
1316 &fcport->fw_cid, &fcport->p_doorbell); in qedf_offload_connection()
1318 QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection " in qedf_offload_connection()
1319 "for portid=%06x.\n", fcport->rdata->ids.port_id); in qedf_offload_connection()
1324 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x " in qedf_offload_connection()
1325 "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id, in qedf_offload_connection()
1326 fcport->fw_cid, fcport->handle); in qedf_offload_connection()
1331 conn_info.sq_pbl_addr = fcport->sq_pbl_dma; in qedf_offload_connection()
1333 conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl); in qedf_offload_connection()
1335 (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8)); in qedf_offload_connection()
1338 ether_addr_copy(conn_info.src_mac, qedf->data_src_addr); in qedf_offload_connection()
1340 ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr); in qedf_offload_connection()
1342 conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size; in qedf_offload_connection()
1343 conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov; in qedf_offload_connection()
1345 conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size; in qedf_offload_connection()
1348 conn_info.vlan_tag = qedf->vlan_id << in qedf_offload_connection()
1351 qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT; in qedf_offload_connection()
1356 port_id = fc_host_port_id(qedf->lport->host); in qedf_offload_connection()
1357 fcport->sid = port_id; in qedf_offload_connection()
1362 conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq; in qedf_offload_connection()
1365 port_id = fcport->rdata->rport->port_id; in qedf_offload_connection()
1372 /* Set FC-TAPE specific flags if needed */ in qedf_offload_connection()
1373 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { in qedf_offload_connection()
1374 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, in qedf_offload_connection()
1376 fcport->rdata->ids.port_id); in qedf_offload_connection()
1380 ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << in qedf_offload_connection()
1384 rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info); in qedf_offload_connection()
1386 QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection " in qedf_offload_connection()
1387 "for portid=%06x.\n", fcport->rdata->ids.port_id); in qedf_offload_connection()
1390 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload " in qedf_offload_connection()
1392 fcport->rdata->ids.port_id, total_sqe); in qedf_offload_connection()
1394 spin_lock_init(&fcport->rport_lock); in qedf_offload_connection()
1395 atomic_set(&fcport->free_sqes, total_sqe); in qedf_offload_connection()
1398 qed_ops->release_conn(qedf->cdev, fcport->handle); in qedf_offload_connection()
1414 term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, in qedf_upload_connection()
1417 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection " in qedf_upload_connection()
1418 "port_id=%06x.\n", fcport->rdata->ids.port_id); in qedf_upload_connection()
1420 qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma); in qedf_upload_connection()
1421 qed_ops->release_conn(qedf->cdev, fcport->handle); in qedf_upload_connection()
1423 dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params, in qedf_upload_connection()
1430 struct fc_rport_priv *rdata = fcport->rdata; in qedf_cleanup_fcport()
1432 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n", in qedf_cleanup_fcport()
1433 fcport->rdata->ids.port_id); in qedf_cleanup_fcport()
1436 qedf_flush_active_ios(fcport, -1); in qedf_cleanup_fcport()
1438 if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) in qedf_cleanup_fcport()
1441 fcport->rdata = NULL; in qedf_cleanup_fcport()
1442 fcport->qedf = NULL; in qedf_cleanup_fcport()
1443 kref_put(&rdata->kref, fc_rport_destroy); in qedf_cleanup_fcport()
1456 struct fc_rport *rport = rdata->rport; in qedf_rport_event_handler()
1463 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, " in qedf_rport_event_handler()
1464 "port_id = 0x%x\n", event, rdata->ids.port_id); in qedf_rport_event_handler()
1469 QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n"); in qedf_rport_event_handler()
1473 rp = rport->dd_data; in qedf_rport_event_handler()
1475 fcport->qedf = qedf; in qedf_rport_event_handler()
1477 if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) { in qedf_rport_event_handler()
1478 QEDF_ERR(&(qedf->dbg_ctx), "Not offloading " in qedf_rport_event_handler()
1480 "reached.\n", rdata->ids.port_id); in qedf_rport_event_handler()
1488 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_rport_event_handler()
1489 QEDF_WARN(&(qedf->dbg_ctx), "Session already " in qedf_rport_event_handler()
1491 rdata->ids.port_id); in qedf_rport_event_handler()
1495 if (rport->port_id == FC_FID_DIR_SERV) { in qedf_rport_event_handler()
1502 QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not " in qedf_rport_event_handler()
1504 rdata->ids.port_id); in qedf_rport_event_handler()
1508 if (rdata->spp_type != FC_TYPE_FCP) { in qedf_rport_event_handler()
1509 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_rport_event_handler()
1513 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { in qedf_rport_event_handler()
1514 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_rport_event_handler()
1520 kref_get(&rdata->kref); in qedf_rport_event_handler()
1521 fcport->rdata = rdata; in qedf_rport_event_handler()
1522 fcport->rport = rport; in qedf_rport_event_handler()
1531 if (rdata->flags & FC_RP_FLAGS_RETRY && in qedf_rport_event_handler()
1532 rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && in qedf_rport_event_handler()
1533 !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) { in qedf_rport_event_handler()
1534 fcport->dev_type = QEDF_RPORT_TYPE_TAPE; in qedf_rport_event_handler()
1535 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_rport_event_handler()
1537 rdata->ids.port_id); in qedf_rport_event_handler()
1539 fcport->dev_type = QEDF_RPORT_TYPE_DISK; in qedf_rport_event_handler()
1549 spin_lock_irqsave(&qedf->hba_lock, flags); in qedf_rport_event_handler()
1550 list_add_rcu(&fcport->peers, &qedf->fcports); in qedf_rport_event_handler()
1551 spin_unlock_irqrestore(&qedf->hba_lock, flags); in qedf_rport_event_handler()
1557 set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags); in qedf_rport_event_handler()
1558 atomic_inc(&qedf->num_offloads); in qedf_rport_event_handler()
1564 port_id = rdata->ids.port_id; in qedf_rport_event_handler()
1568 if (rdata->spp_type != FC_TYPE_FCP) { in qedf_rport_event_handler()
1569 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_rport_event_handler()
1573 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { in qedf_rport_event_handler()
1574 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_rport_event_handler()
1580 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_rport_event_handler()
1581 "port_id=%x - rport notcreated Yet!!\n", port_id); in qedf_rport_event_handler()
1584 rp = rport->dd_data; in qedf_rport_event_handler()
1586 * Perform session upload. Note that rdata->peers is already in qedf_rport_event_handler()
1587 * removed from disc->rports list before we get this event. in qedf_rport_event_handler()
1591 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_rport_event_handler()
1593 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) && in qedf_rport_event_handler()
1595 &fcport->flags)) { in qedf_rport_event_handler()
1597 &fcport->flags); in qedf_rport_event_handler()
1598 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_rport_event_handler()
1604 spin_lock_irqsave(&qedf->hba_lock, flags); in qedf_rport_event_handler()
1605 list_del_rcu(&fcport->peers); in qedf_rport_event_handler()
1606 spin_unlock_irqrestore(&qedf->hba_lock, flags); in qedf_rport_event_handler()
1609 &fcport->flags); in qedf_rport_event_handler()
1610 atomic_dec(&qedf->num_offloads); in qedf_rport_event_handler()
1612 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_rport_event_handler()
1623 /* NO-OP but need to fill in the template */ in qedf_abort_io()
1629 * NO-OP but need to fill in template to prevent a NULL in qedf_fcp_cleanup()
1645 fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO); in qedf_fcoe_ctlr_setup()
1647 qedf->ctlr.send = qedf_fip_send; in qedf_fcoe_ctlr_setup()
1648 qedf->ctlr.get_src_addr = qedf_get_src_mac; in qedf_fcoe_ctlr_setup()
1649 ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac); in qedf_fcoe_ctlr_setup()
1654 struct fc_lport *lport = qedf->lport; in qedf_setup_fdmi()
1663 lport->fdmi_enabled = 1; in qedf_setup_fdmi()
1670 /* Get the PCI-e Device Serial Number Capability */ in qedf_setup_fdmi()
1671 pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN); in qedf_setup_fdmi()
1675 pci_read_config_byte(qedf->pdev, pos + i, &buf[i]); in qedf_setup_fdmi()
1677 snprintf(fc_host_serial_number(lport->host), in qedf_setup_fdmi()
1683 snprintf(fc_host_serial_number(lport->host), in qedf_setup_fdmi()
1686 snprintf(fc_host_manufacturer(lport->host), in qedf_setup_fdmi()
1689 if (qedf->pdev->device == QL45xxx) { in qedf_setup_fdmi()
1690 snprintf(fc_host_model(lport->host), in qedf_setup_fdmi()
1693 snprintf(fc_host_model_description(lport->host), in qedf_setup_fdmi()
1698 if (qedf->pdev->device == QL41xxx) { in qedf_setup_fdmi()
1699 snprintf(fc_host_model(lport->host), in qedf_setup_fdmi()
1702 snprintf(fc_host_model_description(lport->host), in qedf_setup_fdmi()
1707 snprintf(fc_host_hardware_version(lport->host), in qedf_setup_fdmi()
1708 FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision); in qedf_setup_fdmi()
1710 snprintf(fc_host_driver_version(lport->host), in qedf_setup_fdmi()
1713 snprintf(fc_host_firmware_version(lport->host), in qedf_setup_fdmi()
1722 struct fc_lport *lport = qedf->lport; in qedf_lport_setup()
1724 lport->link_up = 0; in qedf_lport_setup()
1725 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; in qedf_lport_setup()
1726 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; in qedf_lport_setup()
1727 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | in qedf_lport_setup()
1729 lport->boot_time = jiffies; in qedf_lport_setup()
1730 lport->e_d_tov = 2 * 1000; in qedf_lport_setup()
1731 lport->r_a_tov = 10 * 1000; in qedf_lport_setup()
1734 lport->does_npiv = 1; in qedf_lport_setup()
1735 fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV; in qedf_lport_setup()
1737 fc_set_wwnn(lport, qedf->wwnn); in qedf_lport_setup()
1738 fc_set_wwpn(lport, qedf->wwpn); in qedf_lport_setup()
1740 if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) { in qedf_lport_setup()
1741 QEDF_ERR(&qedf->dbg_ctx, in qedf_lport_setup()
1743 return -ENOMEM; in qedf_lport_setup()
1751 return -ENOMEM; in qedf_lport_setup()
1758 fc_host_maxframe_size(lport->host) = lport->mfs; in qedf_lport_setup()
1761 fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo; in qedf_lport_setup()
1764 if (qedf->pdev->device == QL45xxx) in qedf_lport_setup()
1765 snprintf(fc_host_symbolic_name(lport->host), 256, in qedf_lport_setup()
1768 if (qedf->pdev->device == QL41xxx) in qedf_lport_setup()
1769 snprintf(fc_host_symbolic_name(lport->host), 256, in qedf_lport_setup()
1784 lport->link_up = 0; in qedf_vport_libfc_config()
1785 lport->qfull = 0; in qedf_vport_libfc_config()
1786 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; in qedf_vport_libfc_config()
1787 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; in qedf_vport_libfc_config()
1788 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | in qedf_vport_libfc_config()
1790 lport->boot_time = jiffies; in qedf_vport_libfc_config()
1791 lport->e_d_tov = 2 * 1000; in qedf_vport_libfc_config()
1792 lport->r_a_tov = 10 * 1000; in qedf_vport_libfc_config()
1793 lport->does_npiv = 1; /* Temporary until we add NPIV support */ in qedf_vport_libfc_config()
1797 return -ENOMEM; in qedf_vport_libfc_config()
1803 lport->crc_offload = 0; in qedf_vport_libfc_config()
1804 lport->seq_offload = 0; in qedf_vport_libfc_config()
1805 lport->lro_enabled = 0; in qedf_vport_libfc_config()
1806 lport->lro_xid = 0; in qedf_vport_libfc_config()
1807 lport->lso_max = 0; in qedf_vport_libfc_config()
1825 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); in qedf_vport_create()
1826 QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, " in qedf_vport_create()
1831 if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) { in qedf_vport_create()
1832 QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport " in qedf_vport_create()
1834 rc = -EIO; in qedf_vport_create()
1840 QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport " in qedf_vport_create()
1842 rc = -ENOMEM; in qedf_vport_create()
1846 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); in qedf_vport_create()
1847 QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n", in qedf_vport_create()
1855 vport_qedf->lport = vn_port; in qedf_vport_create()
1857 vport_qedf->hba_lock = base_qedf->hba_lock; in qedf_vport_create()
1858 vport_qedf->pdev = base_qedf->pdev; in qedf_vport_create()
1859 vport_qedf->cmd_mgr = base_qedf->cmd_mgr; in qedf_vport_create()
1860 init_completion(&vport_qedf->flogi_compl); in qedf_vport_create()
1861 INIT_LIST_HEAD(&vport_qedf->fcports); in qedf_vport_create()
1865 QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory " in qedf_vport_create()
1870 fc_set_wwnn(vn_port, vport->node_name); in qedf_vport_create()
1871 fc_set_wwpn(vn_port, vport->port_name); in qedf_vport_create()
1872 vport_qedf->wwnn = vn_port->wwnn; in qedf_vport_create()
1873 vport_qedf->wwpn = vn_port->wwpn; in qedf_vport_create()
1875 vn_port->host->transportt = qedf_fc_vport_transport_template; in qedf_vport_create()
1876 vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS; in qedf_vport_create()
1877 vn_port->host->max_lun = qedf_max_lun; in qedf_vport_create()
1878 vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD; in qedf_vport_create()
1879 vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN; in qedf_vport_create()
1881 rc = scsi_add_host(vn_port->host, &vport->dev); in qedf_vport_create()
1883 QEDF_WARN(&base_qedf->dbg_ctx, in qedf_vport_create()
1889 fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo; in qedf_vport_create()
1892 memcpy(&vn_port->tt, &qedf_lport_template, in qedf_vport_create()
1909 fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN; in qedf_vport_create()
1914 vn_port->boot_time = jiffies; in qedf_vport_create()
1919 QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n", in qedf_vport_create()
1923 vport_qedf->dbg_ctx.host_no = vn_port->host->host_no; in qedf_vport_create()
1924 vport_qedf->dbg_ctx.pdev = base_qedf->pdev; in qedf_vport_create()
1927 scsi_host_put(vn_port->host); in qedf_vport_create()
1936 struct fc_lport *vn_port = vport->dd_data; in qedf_vport_destroy()
1945 set_bit(QEDF_UNLOADING, &qedf->flags); in qedf_vport_destroy()
1947 mutex_lock(&n_port->lp_mutex); in qedf_vport_destroy()
1948 list_del(&vn_port->list); in qedf_vport_destroy()
1949 mutex_unlock(&n_port->lp_mutex); in qedf_vport_destroy()
1954 /* Detach from scsi-ml */ in qedf_vport_destroy()
1955 fc_remove_host(vn_port->host); in qedf_vport_destroy()
1956 scsi_remove_host(vn_port->host); in qedf_vport_destroy()
1962 if (vn_port->state == LPORT_ST_READY) in qedf_vport_destroy()
1969 if (vn_port->host) in qedf_vport_destroy()
1970 scsi_host_put(vn_port->host); in qedf_vport_destroy()
1978 struct fc_lport *lport = vport->dd_data; in qedf_vport_disable()
1984 lport->boot_time = jiffies; in qedf_vport_disable()
1999 struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host); in qedf_wait_for_vport_destroy()
2001 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, in qedf_wait_for_vport_destroy()
2003 while (fc_host->npiv_vports_inuse > 0) { in qedf_wait_for_vport_destroy()
2004 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, in qedf_wait_for_vport_destroy()
2011 * qedf_fcoe_reset - Resets the fcoe
2029 fc_host_port_id(shost) = lport->port_id; in qedf_get_host_port_id()
2043 if (lport->vport) in qedf_fc_get_host_stats()
2048 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for " in qedf_fc_get_host_stats()
2053 mutex_lock(&qedf->stats_mutex); in qedf_fc_get_host_stats()
2056 qed_ops->get_stats(qedf->cdev, fw_fcoe_stats); in qedf_fc_get_host_stats()
2064 qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt + in qedf_fc_get_host_stats()
2065 fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt + in qedf_fc_get_host_stats()
2066 fw_fcoe_stats->fcoe_tx_other_pkt_cnt; in qedf_fc_get_host_stats()
2067 qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt + in qedf_fc_get_host_stats()
2068 fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt + in qedf_fc_get_host_stats()
2069 fw_fcoe_stats->fcoe_rx_other_pkt_cnt; in qedf_fc_get_host_stats()
2070 qedf_stats->fcp_input_megabytes += in qedf_fc_get_host_stats()
2071 do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000); in qedf_fc_get_host_stats()
2072 qedf_stats->fcp_output_megabytes += in qedf_fc_get_host_stats()
2073 do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000); in qedf_fc_get_host_stats()
2074 qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4; in qedf_fc_get_host_stats()
2075 qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4; in qedf_fc_get_host_stats()
2076 qedf_stats->invalid_crc_count += in qedf_fc_get_host_stats()
2077 fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt; in qedf_fc_get_host_stats()
2078 qedf_stats->dumped_frames = in qedf_fc_get_host_stats()
2079 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; in qedf_fc_get_host_stats()
2080 qedf_stats->error_frames += in qedf_fc_get_host_stats()
2081 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; in qedf_fc_get_host_stats()
2082 qedf_stats->fcp_input_requests += qedf->input_requests; in qedf_fc_get_host_stats()
2083 qedf_stats->fcp_output_requests += qedf->output_requests; in qedf_fc_get_host_stats()
2084 qedf_stats->fcp_control_requests += qedf->control_requests; in qedf_fc_get_host_stats()
2085 qedf_stats->fcp_packet_aborts += qedf->packet_aborts; in qedf_fc_get_host_stats()
2086 qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures; in qedf_fc_get_host_stats()
2088 mutex_unlock(&qedf->stats_mutex); in qedf_fc_get_host_stats()
2166 struct qedf_ctx *qedf = fp->qedf; in qedf_fp_has_work()
2168 struct qed_sb_info *sb_info = fp->sb_info; in qedf_fp_has_work()
2169 struct status_block_e4 *sb = sb_info->sb_virt; in qedf_fp_has_work()
2173 que = qedf->global_queues[fp->sb_id]; in qedf_fp_has_work()
2179 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; in qedf_fp_has_work()
2181 return (que->cq_prod_idx != prod_idx); in qedf_fp_has_work()
2188 /* Process completion queue and copy CQE contents for deferred processesing
2194 struct qedf_ctx *qedf = fp->qedf; in qedf_process_completions()
2195 struct qed_sb_info *sb_info = fp->sb_info; in qedf_process_completions()
2196 struct status_block_e4 *sb = sb_info->sb_virt; in qedf_process_completions()
2199 struct fcoe_cqe *cqe; in qedf_process_completions() local
2209 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; in qedf_process_completions()
2212 que = qedf->global_queues[fp->sb_id]; in qedf_process_completions()
2215 new_cqes = (prod_idx >= que->cq_prod_idx) ? in qedf_process_completions()
2216 (prod_idx - que->cq_prod_idx) : in qedf_process_completions()
2217 0x10000 - que->cq_prod_idx + prod_idx; in qedf_process_completions()
2220 que->cq_prod_idx = prod_idx; in qedf_process_completions()
2223 fp->completions++; in qedf_process_completions()
2225 cqe = &que->cq[que->cq_cons_idx]; in qedf_process_completions()
2227 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & in qedf_process_completions()
2235 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, in qedf_process_completions()
2236 "Unsolicated CQE.\n"); in qedf_process_completions()
2237 qedf_process_unsol_compl(qedf, fp->sb_id, cqe); in qedf_process_completions()
2245 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; in qedf_process_completions()
2246 io_req = &qedf->cmd_mgr->cmds[xid]; in qedf_process_completions()
2253 /* If there is not io_req assocated with this CQE in qedf_process_completions()
2258 cpu = io_req->cpu; in qedf_process_completions()
2259 io_req->int_cpu = smp_processor_id(); in qedf_process_completions()
2262 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); in qedf_process_completions()
2264 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " in qedf_process_completions()
2270 INIT_WORK(&io_work->work, qedf_fp_io_handler); in qedf_process_completions()
2272 /* Copy contents of CQE for deferred processing */ in qedf_process_completions()
2273 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); in qedf_process_completions()
2275 io_work->qedf = fp->qedf; in qedf_process_completions()
2276 io_work->fp = NULL; /* Only used for unsolicited frames */ in qedf_process_completions()
2278 queue_work_on(cpu, qedf_io_wq, &io_work->work); in qedf_process_completions()
2281 que->cq_cons_idx++; in qedf_process_completions()
2282 if (que->cq_cons_idx == fp->cq_num_entries) in qedf_process_completions()
2283 que->cq_cons_idx = 0; in qedf_process_completions()
2284 new_cqes--; in qedf_process_completions()
2291 /* MSI-X fastpath handler code */
2300 if (!fp->sb_info) { in qedf_msix_handler()
2301 QEDF_ERR(NULL, "fp->sb_info in null."); in qedf_msix_handler()
2309 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); in qedf_msix_handler()
2316 qed_sb_update_sb_idx(fp->sb_info); in qedf_msix_handler()
2322 /* Re-enable interrupts */ in qedf_msix_handler()
2323 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); in qedf_msix_handler()
2339 QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf); in qedf_simd_int_handler()
2349 if (qedf->int_info.msix_cnt) { in qedf_sync_free_irqs()
2350 for (i = 0; i < qedf->int_info.used_cnt; i++) { in qedf_sync_free_irqs()
2351 vector_idx = i * qedf->dev_info.common.num_hwfns + in qedf_sync_free_irqs()
2352 qed_ops->common->get_affin_hwfn_idx(qedf->cdev); in qedf_sync_free_irqs()
2353 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_sync_free_irqs()
2356 vector = qedf->int_info.msix[vector_idx].vector; in qedf_sync_free_irqs()
2360 free_irq(vector, &qedf->fp_array[i]); in qedf_sync_free_irqs()
2363 qed_ops->common->simd_handler_clean(qedf->cdev, in qedf_sync_free_irqs()
2366 qedf->int_info.used_cnt = 0; in qedf_sync_free_irqs()
2367 qed_ops->common->set_fp_int(qedf->cdev, 0); in qedf_sync_free_irqs()
2377 for (i = 0; i < qedf->num_queues; i++) { in qedf_request_msix_irq()
2378 vector_idx = i * qedf->dev_info.common.num_hwfns + in qedf_request_msix_irq()
2379 qed_ops->common->get_affin_hwfn_idx(qedf->cdev); in qedf_request_msix_irq()
2380 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_request_msix_irq()
2383 vector = qedf->int_info.msix[vector_idx].vector; in qedf_request_msix_irq()
2385 &qedf->fp_array[i]); in qedf_request_msix_irq()
2388 QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n"); in qedf_request_msix_irq()
2393 qedf->int_info.used_cnt++; in qedf_request_msix_irq()
2408 rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus()); in qedf_setup_int()
2412 rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info); in qedf_setup_int()
2416 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = " in qedf_setup_int()
2417 "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt, in qedf_setup_int()
2420 if (qedf->int_info.msix_cnt) in qedf_setup_int()
2423 qed_ops->common->simd_handler_config(qedf->cdev, &qedf, in qedf_setup_int()
2425 qedf->int_info.used_cnt = 1; in qedf_setup_int()
2427 QEDF_ERR(&qedf->dbg_ctx, in qedf_setup_int()
2428 "Cannot load driver due to a lack of MSI-X vectors.\n"); in qedf_setup_int()
2429 return -EINVAL; in qedf_setup_int()
2448 lport = qedf->lport; in qedf_recv_frame()
2449 if (lport == NULL || lport->state == LPORT_ST_DISABLED) { in qedf_recv_frame()
2457 mac = eth_hdr(skb)->h_source; in qedf_recv_frame()
2458 dest_mac = eth_hdr(skb)->h_dest; in qedf_recv_frame()
2461 hp = (struct fcoe_hdr *)skb->data; in qedf_recv_frame()
2464 fr_len = skb->len - sizeof(struct fcoe_crc_eof); in qedf_recv_frame()
2469 fr_sof(fp) = hp->fcoe_sof; in qedf_recv_frame()
2489 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && in qedf_recv_frame()
2490 fh->fh_type == FC_TYPE_FCP) { in qedf_recv_frame()
2495 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && in qedf_recv_frame()
2496 fh->fh_type == FC_TYPE_ELS) { in qedf_recv_frame()
2499 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { in qedf_recv_frame()
2500 /* drop non-FIP LOGO */ in qedf_recv_frame()
2508 if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { in qedf_recv_frame()
2514 if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) { in qedf_recv_frame()
2515 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, in qedf_recv_frame()
2521 if (qedf->ctlr.state) { in qedf_recv_frame()
2522 if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) { in qedf_recv_frame()
2523 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, in qedf_recv_frame()
2525 mac, qedf->ctlr.dest_addr); in qedf_recv_frame()
2531 vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); in qedf_recv_frame()
2538 if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { in qedf_recv_frame()
2539 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, in qedf_recv_frame()
2540 "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n", in qedf_recv_frame()
2541 lport->port_id, ntoh24(fh->fh_d_id)); in qedf_recv_frame()
2546 f_ctl = ntoh24(fh->fh_f_ctl); in qedf_recv_frame()
2547 if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) && in qedf_recv_frame()
2550 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, in qedf_recv_frame()
2563 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); in qedf_recv_frame()
2566 &fcport->flags)) { in qedf_recv_frame()
2567 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, in qedf_recv_frame()
2573 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: " in qedf_recv_frame()
2575 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, in qedf_recv_frame()
2576 fh->fh_type); in qedf_recv_frame()
2579 1, skb->data, skb->len, false); in qedf_recv_frame()
2587 struct qedf_ctx *qedf = skb_work->qedf; in qedf_ll2_process_skb()
2588 struct sk_buff *skb = skb_work->skb; in qedf_ll2_process_skb()
2596 eh = (struct ethhdr *)skb->data; in qedf_ll2_process_skb()
2599 if (eh->h_proto == htons(ETH_P_8021Q)) { in qedf_ll2_process_skb()
2610 if (eh->h_proto == htons(ETH_P_FIP)) { in qedf_ll2_process_skb()
2613 } else if (eh->h_proto == htons(ETH_P_FCOE)) { in qedf_ll2_process_skb()
2633 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { in qedf_ll2_rx()
2634 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, in qedf_ll2_rx()
2642 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so " in qedf_ll2_rx()
2648 INIT_WORK(&skb_work->work, qedf_ll2_process_skb); in qedf_ll2_rx()
2649 skb_work->skb = skb; in qedf_ll2_rx()
2650 skb_work->qedf = qedf; in qedf_ll2_rx()
2651 queue_work(qedf->ll2_recv_wq, &skb_work->work); in qedf_ll2_rx()
2669 * Deferred part of unsolicited CQE sends in qedf_fp_io_handler()
2672 comp_type = (io_work->cqe.cqe_data >> in qedf_fp_io_handler()
2676 io_work->fp) in qedf_fp_io_handler()
2677 fc_exch_recv(io_work->qedf->lport, io_work->fp); in qedf_fp_io_handler()
2679 qedf_process_cqe(io_work->qedf, &io_work->cqe); in qedf_fp_io_handler()
2691 sb_virt = dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_and_init_sb()
2695 QEDF_ERR(&qedf->dbg_ctx, in qedf_alloc_and_init_sb()
2698 return -ENOMEM; in qedf_alloc_and_init_sb()
2701 ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys, in qedf_alloc_and_init_sb()
2705 QEDF_ERR(&qedf->dbg_ctx, in qedf_alloc_and_init_sb()
2716 if (sb_info->sb_virt) in qedf_free_sb()
2717 dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt), in qedf_free_sb()
2718 (void *)sb_info->sb_virt, sb_info->sb_phys); in qedf_free_sb()
2726 for (id = 0; id < qedf->num_queues; id++) { in qedf_destroy_sb()
2727 fp = &(qedf->fp_array[id]); in qedf_destroy_sb()
2728 if (fp->sb_id == QEDF_SB_ID_NULL) in qedf_destroy_sb()
2730 qedf_free_sb(qedf, fp->sb_info); in qedf_destroy_sb()
2731 kfree(fp->sb_info); in qedf_destroy_sb()
2733 kfree(qedf->fp_array); in qedf_destroy_sb()
2742 qedf->fp_array = in qedf_prepare_sb()
2743 kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath), in qedf_prepare_sb()
2746 if (!qedf->fp_array) { in qedf_prepare_sb()
2747 QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation " in qedf_prepare_sb()
2749 return -ENOMEM; in qedf_prepare_sb()
2752 for (id = 0; id < qedf->num_queues; id++) { in qedf_prepare_sb()
2753 fp = &(qedf->fp_array[id]); in qedf_prepare_sb()
2754 fp->sb_id = QEDF_SB_ID_NULL; in qedf_prepare_sb()
2755 fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); in qedf_prepare_sb()
2756 if (!fp->sb_info) { in qedf_prepare_sb()
2757 QEDF_ERR(&(qedf->dbg_ctx), "SB info struct " in qedf_prepare_sb()
2761 ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id); in qedf_prepare_sb()
2763 QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and " in qedf_prepare_sb()
2767 fp->sb_id = id; in qedf_prepare_sb()
2768 fp->qedf = qedf; in qedf_prepare_sb()
2769 fp->cq_num_entries = in qedf_prepare_sb()
2770 qedf->global_queues[id]->cq_mem_size / in qedf_prepare_sb()
2777 void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe) in qedf_process_cqe() argument
2784 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & in qedf_process_cqe()
2787 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; in qedf_process_cqe()
2788 io_req = &qedf->cmd_mgr->cmds[xid]; in qedf_process_cqe()
2792 QEDF_ERR(&qedf->dbg_ctx, in qedf_process_cqe()
2797 fcport = io_req->fcport; in qedf_process_cqe()
2800 QEDF_ERR(&qedf->dbg_ctx, in qedf_process_cqe()
2810 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_process_cqe()
2811 QEDF_ERR(&qedf->dbg_ctx, in qedf_process_cqe()
2819 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2820 switch (io_req->cmd_type) { in qedf_process_cqe()
2822 qedf_scsi_completion(qedf, cqe, io_req); in qedf_process_cqe()
2825 qedf_process_els_compl(qedf, cqe, io_req); in qedf_process_cqe()
2828 qedf_process_tmf_compl(qedf, cqe, io_req); in qedf_process_cqe()
2831 qedf_process_seq_cleanup_compl(qedf, cqe, io_req); in qedf_process_cqe()
2836 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2837 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2838 "Error detect CQE.\n"); in qedf_process_cqe()
2839 qedf_process_error_detect(qedf, cqe, io_req); in qedf_process_cqe()
2842 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2843 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2844 "Cleanup CQE.\n"); in qedf_process_cqe()
2845 qedf_process_cleanup_compl(qedf, cqe, io_req); in qedf_process_cqe()
2848 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2849 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2850 "Abort CQE.\n"); in qedf_process_cqe()
2851 qedf_process_abts_compl(qedf, cqe, io_req); in qedf_process_cqe()
2854 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2855 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2856 "Dummy CQE.\n"); in qedf_process_cqe()
2859 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2860 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2861 "Local completion CQE.\n"); in qedf_process_cqe()
2864 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2865 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2866 "Warning CQE.\n"); in qedf_process_cqe()
2867 qedf_process_warning_compl(qedf, cqe, io_req); in qedf_process_cqe()
2870 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2871 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2872 "Max FCoE CQE.\n"); in qedf_process_cqe()
2875 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2876 "Default CQE.\n"); in qedf_process_cqe()
2885 if (qedf->bdq_pbl_list) in qedf_free_bdq()
2886 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, in qedf_free_bdq()
2887 qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma); in qedf_free_bdq()
2889 if (qedf->bdq_pbl) in qedf_free_bdq()
2890 dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size, in qedf_free_bdq()
2891 qedf->bdq_pbl, qedf->bdq_pbl_dma); in qedf_free_bdq()
2894 if (qedf->bdq[i].buf_addr) { in qedf_free_bdq()
2895 dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE, in qedf_free_bdq()
2896 qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma); in qedf_free_bdq()
2904 struct global_queue **gl = qedf->global_queues; in qedf_free_global_queues()
2906 for (i = 0; i < qedf->num_queues; i++) { in qedf_free_global_queues()
2910 if (gl[i]->cq) in qedf_free_global_queues()
2911 dma_free_coherent(&qedf->pdev->dev, in qedf_free_global_queues()
2912 gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma); in qedf_free_global_queues()
2913 if (gl[i]->cq_pbl) in qedf_free_global_queues()
2914 dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size, in qedf_free_global_queues()
2915 gl[i]->cq_pbl, gl[i]->cq_pbl_dma); in qedf_free_global_queues()
2932 qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_bdq()
2933 QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL); in qedf_alloc_bdq()
2934 if (!qedf->bdq[i].buf_addr) { in qedf_alloc_bdq()
2935 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ " in qedf_alloc_bdq()
2937 return -ENOMEM; in qedf_alloc_bdq()
2942 qedf->bdq_pbl_mem_size = in qedf_alloc_bdq()
2944 qedf->bdq_pbl_mem_size = in qedf_alloc_bdq()
2945 ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE); in qedf_alloc_bdq()
2947 qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_bdq()
2948 qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL); in qedf_alloc_bdq()
2949 if (!qedf->bdq_pbl) { in qedf_alloc_bdq()
2950 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n"); in qedf_alloc_bdq()
2951 return -ENOMEM; in qedf_alloc_bdq()
2954 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_alloc_bdq()
2956 qedf->bdq_pbl, &qedf->bdq_pbl_dma); in qedf_alloc_bdq()
2962 pbl = (struct scsi_bd *)qedf->bdq_pbl; in qedf_alloc_bdq()
2964 pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma)); in qedf_alloc_bdq()
2965 pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma)); in qedf_alloc_bdq()
2966 pbl->opaque.fcoe_opaque.hi = 0; in qedf_alloc_bdq()
2968 pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i); in qedf_alloc_bdq()
2973 qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_bdq()
2975 &qedf->bdq_pbl_list_dma, in qedf_alloc_bdq()
2977 if (!qedf->bdq_pbl_list) { in qedf_alloc_bdq()
2978 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); in qedf_alloc_bdq()
2979 return -ENOMEM; in qedf_alloc_bdq()
2986 qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size / in qedf_alloc_bdq()
2988 list = (u64 *)qedf->bdq_pbl_list; in qedf_alloc_bdq()
2989 page = qedf->bdq_pbl_list_dma; in qedf_alloc_bdq()
2990 for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) { in qedf_alloc_bdq()
2991 *list = qedf->bdq_pbl_dma; in qedf_alloc_bdq()
3013 if (!qedf->num_queues) { in qedf_alloc_global_queues()
3014 QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n"); in qedf_alloc_global_queues()
3022 if (!qedf->p_cpuq) { in qedf_alloc_global_queues()
3024 QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n"); in qedf_alloc_global_queues()
3028 qedf->global_queues = kzalloc((sizeof(struct global_queue *) in qedf_alloc_global_queues()
3029 * qedf->num_queues), GFP_KERNEL); in qedf_alloc_global_queues()
3030 if (!qedf->global_queues) { in qedf_alloc_global_queues()
3031 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global " in qedf_alloc_global_queues()
3033 return -ENOMEM; in qedf_alloc_global_queues()
3035 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_alloc_global_queues()
3036 "qedf->global_queues=%p.\n", qedf->global_queues); in qedf_alloc_global_queues()
3041 QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n"); in qedf_alloc_global_queues()
3045 /* Allocate a CQ and an associated PBL for each MSI-X vector */ in qedf_alloc_global_queues()
3046 for (i = 0; i < qedf->num_queues; i++) { in qedf_alloc_global_queues()
3047 qedf->global_queues[i] = kzalloc(sizeof(struct global_queue), in qedf_alloc_global_queues()
3049 if (!qedf->global_queues[i]) { in qedf_alloc_global_queues()
3050 QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate " in qedf_alloc_global_queues()
3052 status = -ENOMEM; in qedf_alloc_global_queues()
3056 qedf->global_queues[i]->cq_mem_size = in qedf_alloc_global_queues()
3058 qedf->global_queues[i]->cq_mem_size = in qedf_alloc_global_queues()
3059 ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE); in qedf_alloc_global_queues()
3061 qedf->global_queues[i]->cq_pbl_size = in qedf_alloc_global_queues()
3062 (qedf->global_queues[i]->cq_mem_size / in qedf_alloc_global_queues()
3064 qedf->global_queues[i]->cq_pbl_size = in qedf_alloc_global_queues()
3065 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); in qedf_alloc_global_queues()
3067 qedf->global_queues[i]->cq = in qedf_alloc_global_queues()
3068 dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_global_queues()
3069 qedf->global_queues[i]->cq_mem_size, in qedf_alloc_global_queues()
3070 &qedf->global_queues[i]->cq_dma, in qedf_alloc_global_queues()
3073 if (!qedf->global_queues[i]->cq) { in qedf_alloc_global_queues()
3074 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); in qedf_alloc_global_queues()
3075 status = -ENOMEM; in qedf_alloc_global_queues()
3079 qedf->global_queues[i]->cq_pbl = in qedf_alloc_global_queues()
3080 dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_global_queues()
3081 qedf->global_queues[i]->cq_pbl_size, in qedf_alloc_global_queues()
3082 &qedf->global_queues[i]->cq_pbl_dma, in qedf_alloc_global_queues()
3085 if (!qedf->global_queues[i]->cq_pbl) { in qedf_alloc_global_queues()
3086 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); in qedf_alloc_global_queues()
3087 status = -ENOMEM; in qedf_alloc_global_queues()
3092 num_pages = qedf->global_queues[i]->cq_mem_size / in qedf_alloc_global_queues()
3094 page = qedf->global_queues[i]->cq_dma; in qedf_alloc_global_queues()
3095 pbl = (u32 *)qedf->global_queues[i]->cq_pbl; in qedf_alloc_global_queues()
3097 while (num_pages--) { in qedf_alloc_global_queues()
3105 qedf->global_queues[i]->cq_cons_idx = 0; in qedf_alloc_global_queues()
3108 list = (u32 *)qedf->p_cpuq; in qedf_alloc_global_queues()
3116 for (i = 0; i < qedf->num_queues; i++) { in qedf_alloc_global_queues()
3117 *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma); in qedf_alloc_global_queues()
3119 *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma); in qedf_alloc_global_queues()
3149 qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf); in qedf_set_fcoe_pf_param()
3151 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", in qedf_set_fcoe_pf_param()
3152 qedf->num_queues); in qedf_set_fcoe_pf_param()
3154 qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev, in qedf_set_fcoe_pf_param()
3155 qedf->num_queues * sizeof(struct qedf_glbl_q_params), in qedf_set_fcoe_pf_param()
3156 &qedf->hw_p_cpuq, GFP_KERNEL); in qedf_set_fcoe_pf_param()
3158 if (!qedf->p_cpuq) { in qedf_set_fcoe_pf_param()
3159 QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n"); in qedf_set_fcoe_pf_param()
3165 QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation " in qedf_set_fcoe_pf_param()
3180 memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params)); in qedf_set_fcoe_pf_param()
3183 qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS; in qedf_set_fcoe_pf_param()
3184 qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS; in qedf_set_fcoe_pf_param()
3185 qedf->pf_params.fcoe_pf_params.glbl_q_params_addr = in qedf_set_fcoe_pf_param()
3186 (u64)qedf->hw_p_cpuq; in qedf_set_fcoe_pf_param()
3187 qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages; in qedf_set_fcoe_pf_param()
3189 qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0; in qedf_set_fcoe_pf_param()
3191 qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries; in qedf_set_fcoe_pf_param()
3192 qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues; in qedf_set_fcoe_pf_param()
3195 qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE); in qedf_set_fcoe_pf_param()
3197 qedf->pf_params.fcoe_pf_params.mtu = 9000; in qedf_set_fcoe_pf_param()
3198 qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI; in qedf_set_fcoe_pf_param()
3199 qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI; in qedf_set_fcoe_pf_param()
3202 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] = in qedf_set_fcoe_pf_param()
3203 qedf->bdq_pbl_list_dma; in qedf_set_fcoe_pf_param()
3204 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] = in qedf_set_fcoe_pf_param()
3205 qedf->bdq_pbl_list_num_entries; in qedf_set_fcoe_pf_param()
3206 qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE; in qedf_set_fcoe_pf_param()
3208 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_set_fcoe_pf_param()
3210 qedf->bdq_pbl_list, in qedf_set_fcoe_pf_param()
3211 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0], in qedf_set_fcoe_pf_param()
3212 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]); in qedf_set_fcoe_pf_param()
3214 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_set_fcoe_pf_param()
3216 qedf->pf_params.fcoe_pf_params.cq_num_entries); in qedf_set_fcoe_pf_param()
3226 if (qedf->p_cpuq) { in qedf_free_fcoe_pf_param()
3227 size = qedf->num_queues * sizeof(struct qedf_glbl_q_params); in qedf_free_fcoe_pf_param()
3228 dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq, in qedf_free_fcoe_pf_param()
3229 qedf->hw_p_cpuq); in qedf_free_fcoe_pf_param()
3234 kfree(qedf->global_queues); in qedf_free_fcoe_pf_param()
3258 int rc = -EINVAL; in __qedf_probe()
3286 rc = -ENOMEM; in __qedf_probe()
3294 set_bit(QEDF_PROBING, &qedf->flags); in __qedf_probe()
3295 qedf->lport = lport; in __qedf_probe()
3296 qedf->ctlr.lp = lport; in __qedf_probe()
3297 qedf->pdev = pdev; in __qedf_probe()
3298 qedf->dbg_ctx.pdev = pdev; in __qedf_probe()
3299 qedf->dbg_ctx.host_no = lport->host->host_no; in __qedf_probe()
3300 spin_lock_init(&qedf->hba_lock); in __qedf_probe()
3301 INIT_LIST_HEAD(&qedf->fcports); in __qedf_probe()
3302 qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1; in __qedf_probe()
3303 atomic_set(&qedf->num_offloads, 0); in __qedf_probe()
3304 qedf->stop_io_on_error = false; in __qedf_probe()
3306 init_completion(&qedf->fipvlan_compl); in __qedf_probe()
3307 mutex_init(&qedf->stats_mutex); in __qedf_probe()
3308 mutex_init(&qedf->flush_mutex); in __qedf_probe()
3309 qedf->flogi_pending = 0; in __qedf_probe()
3311 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, in __qedf_probe()
3319 set_bit(QEDF_PROBING, &qedf->flags); in __qedf_probe()
3320 lport = qedf->lport; in __qedf_probe()
3323 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n"); in __qedf_probe()
3325 host = lport->host; in __qedf_probe()
3328 qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN, in __qedf_probe()
3330 if (qedf->io_mempool == NULL) { in __qedf_probe()
3331 QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n"); in __qedf_probe()
3334 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n", in __qedf_probe()
3335 qedf->io_mempool); in __qedf_probe()
3338 qedf->lport->host->host_no); in __qedf_probe()
3339 qedf->link_update_wq = create_workqueue(host_buf); in __qedf_probe()
3340 INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); in __qedf_probe()
3341 INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); in __qedf_probe()
3342 INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump); in __qedf_probe()
3343 INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work); in __qedf_probe()
3344 qedf->fipvlan_retries = qedf_fipvlan_retries; in __qedf_probe()
3346 if (qedf_default_prio > -1) { in __qedf_probe()
3351 qedf->prio = qedf_default_prio; in __qedf_probe()
3353 qedf->prio = QEDF_DEFAULT_PRIO; in __qedf_probe()
3364 qedf->cdev = qed_ops->common->probe(pdev, &qed_params); in __qedf_probe()
3365 if (!qedf->cdev) { in __qedf_probe()
3367 QEDF_ERR(&qedf->dbg_ctx, in __qedf_probe()
3369 retry_cnt--; in __qedf_probe()
3372 QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n"); in __qedf_probe()
3373 rc = -ENODEV; in __qedf_probe()
3378 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); in __qedf_probe()
3380 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); in __qedf_probe()
3384 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in __qedf_probe()
3386 qedf->dev_info.common.num_hwfns, in __qedf_probe()
3387 qed_ops->common->get_affin_hwfn_idx(qedf->cdev)); in __qedf_probe()
3399 QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n"); in __qedf_probe()
3402 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); in __qedf_probe()
3405 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); in __qedf_probe()
3407 QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n"); in __qedf_probe()
3412 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; in __qedf_probe()
3413 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; in __qedf_probe()
3414 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in __qedf_probe()
3415 "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod, in __qedf_probe()
3416 qedf->bdq_secondary_prod); in __qedf_probe()
3418 qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf); in __qedf_probe()
3423 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); in __qedf_probe()
3427 /* Start the Slowpath-process */ in __qedf_probe()
3434 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); in __qedf_probe()
3436 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); in __qedf_probe()
3444 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); in __qedf_probe()
3449 QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n"); in __qedf_probe()
3453 rc = qed_ops->start(qedf->cdev, &qedf->tasks); in __qedf_probe()
3455 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n"); in __qedf_probe()
3458 task_start = qedf_get_task_mem(&qedf->tasks, 0); in __qedf_probe()
3459 task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1); in __qedf_probe()
3460 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, " in __qedf_probe()
3462 qedf->tasks.size); in __qedf_probe()
3466 * the f/w will do a prefetch and we'll get an unsolicited CQE when a in __qedf_probe()
3469 qedf->bdq_prod_idx = QEDF_BDQ_SIZE; in __qedf_probe()
3470 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in __qedf_probe()
3472 qedf->bdq_prod_idx); in __qedf_probe()
3473 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); in __qedf_probe()
3474 readw(qedf->bdq_primary_prod); in __qedf_probe()
3475 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); in __qedf_probe()
3476 readw(qedf->bdq_secondary_prod); in __qedf_probe()
3478 qed_ops->common->set_power_state(qedf->cdev, PCI_D0); in __qedf_probe()
3483 ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac); in __qedf_probe()
3484 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n", in __qedf_probe()
3485 qedf->mac); in __qedf_probe()
3490 * If the info we get from qed is non-zero then use that to set the in __qedf_probe()
3494 if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) { in __qedf_probe()
3495 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in __qedf_probe()
3497 qedf->wwnn = qedf->dev_info.wwnn; in __qedf_probe()
3498 qedf->wwpn = qedf->dev_info.wwpn; in __qedf_probe()
3500 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in __qedf_probe()
3502 qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0); in __qedf_probe()
3503 qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0); in __qedf_probe()
3505 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx " in __qedf_probe()
3506 "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn); in __qedf_probe()
3508 sprintf(host_buf, "host_%d", host->host_no); in __qedf_probe()
3509 qed_ops->common->set_name(qedf->cdev, host_buf); in __qedf_probe()
3512 qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf); in __qedf_probe()
3513 if (!qedf->cmd_mgr) { in __qedf_probe()
3514 QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n"); in __qedf_probe()
3515 rc = -ENOMEM; in __qedf_probe()
3520 host->transportt = qedf_fc_transport_template; in __qedf_probe()
3521 host->max_lun = qedf_max_lun; in __qedf_probe()
3522 host->max_cmd_len = QEDF_MAX_CDB_LEN; in __qedf_probe()
3523 host->can_queue = FCOE_PARAMS_NUM_TASKS; in __qedf_probe()
3524 rc = scsi_add_host(host, &pdev->dev); in __qedf_probe()
3526 QEDF_WARN(&qedf->dbg_ctx, in __qedf_probe()
3534 ether_addr_copy(params.ll2_mac_address, qedf->mac); in __qedf_probe()
3537 snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no); in __qedf_probe()
3538 qedf->ll2_recv_wq = in __qedf_probe()
3540 if (!qedf->ll2_recv_wq) { in __qedf_probe()
3541 QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); in __qedf_probe()
3542 rc = -ENOMEM; in __qedf_probe()
3547 qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops, in __qedf_probe()
3552 qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf); in __qedf_probe()
3553 rc = qed_ops->ll2->start(qedf->cdev, &params); in __qedf_probe()
3555 QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n"); in __qedf_probe()
3558 set_bit(QEDF_LL2_STARTED, &qedf->flags); in __qedf_probe()
3561 qedf->vlan_id = 0; in __qedf_probe()
3574 QEDF_ERR(&(qedf->dbg_ctx), in __qedf_probe()
3580 sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no); in __qedf_probe()
3581 qedf->timer_work_queue = in __qedf_probe()
3583 if (!qedf->timer_work_queue) { in __qedf_probe()
3584 QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " in __qedf_probe()
3586 rc = -ENOMEM; in __qedf_probe()
3593 qedf->lport->host->host_no); in __qedf_probe()
3594 qedf->dpc_wq = create_workqueue(host_buf); in __qedf_probe()
3596 INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler); in __qedf_probe()
3603 qedf->grcdump_size = in __qedf_probe()
3604 qed_ops->common->dbg_all_data_size(qedf->cdev); in __qedf_probe()
3605 if (qedf->grcdump_size) { in __qedf_probe()
3606 rc = qedf_alloc_grc_dump_buf(&qedf->grcdump, in __qedf_probe()
3607 qedf->grcdump_size); in __qedf_probe()
3609 QEDF_ERR(&(qedf->dbg_ctx), in __qedf_probe()
3611 qedf->grcdump = NULL; in __qedf_probe()
3614 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in __qedf_probe()
3616 qedf->grcdump, qedf->grcdump_size); in __qedf_probe()
3621 spin_lock_init(&qedf->io_trace_lock); in __qedf_probe()
3622 qedf->io_trace_idx = 0; in __qedf_probe()
3625 init_completion(&qedf->flogi_compl); in __qedf_probe()
3627 status = qed_ops->common->update_drv_state(qedf->cdev, true); in __qedf_probe()
3629 QEDF_ERR(&(qedf->dbg_ctx), in __qedf_probe()
3634 status = qed_ops->common->set_link(qedf->cdev, &link_params); in __qedf_probe()
3636 QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n"); in __qedf_probe()
3640 fcoe_ctlr_link_up(&qedf->ctlr); in __qedf_probe()
3644 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); in __qedf_probe()
3646 clear_bit(QEDF_PROBING, &qedf->flags); in __qedf_probe()
3652 if (qedf->ll2_recv_wq) in __qedf_probe()
3653 destroy_workqueue(qedf->ll2_recv_wq); in __qedf_probe()
3654 fc_remove_host(qedf->lport->host); in __qedf_probe()
3655 scsi_remove_host(qedf->lport->host); in __qedf_probe()
3657 qedf_dbg_host_exit(&(qedf->dbg_ctx)); in __qedf_probe()
3660 qedf_cmd_mgr_free(qedf->cmd_mgr); in __qedf_probe()
3662 qed_ops->stop(qedf->cdev); in __qedf_probe()
3667 qed_ops->common->slowpath_stop(qedf->cdev); in __qedf_probe()
3669 qed_ops->common->remove(qedf->cdev); in __qedf_probe()
3671 scsi_host_put(lport->host); in __qedf_probe()
3674 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); in __qedf_probe()
3676 clear_bit(QEDF_PROBING, &qedf->flags); in __qedf_probe()
3702 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { in __qedf_remove()
3703 QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n"); in __qedf_remove()
3708 set_bit(QEDF_UNLOADING, &qedf->flags); in __qedf_remove()
3712 fcoe_ctlr_link_down(&qedf->ctlr); in __qedf_remove()
3714 fc_fabric_logoff(qedf->lport); in __qedf_remove()
3717 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); in __qedf_remove()
3720 qedf_dbg_host_exit(&(qedf->dbg_ctx)); in __qedf_remove()
3724 cancel_delayed_work_sync(&qedf->link_update); in __qedf_remove()
3725 destroy_workqueue(qedf->link_update_wq); in __qedf_remove()
3726 qedf->link_update_wq = NULL; in __qedf_remove()
3728 if (qedf->timer_work_queue) in __qedf_remove()
3729 destroy_workqueue(qedf->timer_work_queue); in __qedf_remove()
3732 clear_bit(QEDF_LL2_STARTED, &qedf->flags); in __qedf_remove()
3733 qed_ops->ll2->stop(qedf->cdev); in __qedf_remove()
3734 if (qedf->ll2_recv_wq) in __qedf_remove()
3735 destroy_workqueue(qedf->ll2_recv_wq); in __qedf_remove()
3746 qedf_free_grc_dump_buf(&qedf->grcdump); in __qedf_remove()
3750 fcoe_ctlr_destroy(&qedf->ctlr); in __qedf_remove()
3751 fc_lport_destroy(qedf->lport); in __qedf_remove()
3752 fc_remove_host(qedf->lport->host); in __qedf_remove()
3753 scsi_remove_host(qedf->lport->host); in __qedf_remove()
3756 qedf_cmd_mgr_free(qedf->cmd_mgr); in __qedf_remove()
3759 fc_exch_mgr_free(qedf->lport); in __qedf_remove()
3760 fc_lport_free_stats(qedf->lport); in __qedf_remove()
3770 qed_ops->stop(qedf->cdev); in __qedf_remove()
3773 if (qedf->dpc_wq) { in __qedf_remove()
3775 destroy_workqueue(qedf->dpc_wq); in __qedf_remove()
3776 qedf->dpc_wq = NULL; in __qedf_remove()
3783 qed_ops->common->set_power_state(qedf->cdev, PCI_D0); in __qedf_remove()
3787 rc = qed_ops->common->update_drv_state(qedf->cdev, false); in __qedf_remove()
3789 QEDF_ERR(&(qedf->dbg_ctx), in __qedf_remove()
3792 qed_ops->common->slowpath_stop(qedf->cdev); in __qedf_remove()
3793 qed_ops->common->remove(qedf->cdev); in __qedf_remove()
3795 mempool_destroy(qedf->io_mempool); in __qedf_remove()
3799 scsi_host_put(qedf->lport->host); in __qedf_remove()
3805 if (!atomic_read(&pdev->enable_cnt)) in qedf_remove()
3816 QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n"); in qedf_wq_grcdump()
3824 QEDF_ERR(&(qedf->dbg_ctx), in qedf_schedule_hw_err_handler()
3828 if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { in qedf_schedule_hw_err_handler()
3829 QEDF_ERR(&(qedf->dbg_ctx), in qedf_schedule_hw_err_handler()
3836 schedule_delayed_work(&qedf->board_disable_work, 0); in qedf_schedule_hw_err_handler()
3843 qed_ops->common->attn_clr_enable(qedf->cdev, true); in qedf_schedule_hw_err_handler()
3847 qed_ops->common->attn_clr_enable(qedf->cdev, true); in qedf_schedule_hw_err_handler()
3850 qed_ops->common->recovery_process(qedf->cdev); in qedf_schedule_hw_err_handler()
3875 if (test_bit(QEDF_PROBING, &qedf->flags)) { in qedf_get_protocol_tlv_data()
3876 QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n"); in qedf_get_protocol_tlv_data()
3880 lport = qedf->lport; in qedf_get_protocol_tlv_data()
3881 host = lport->host; in qedf_get_protocol_tlv_data()
3887 fcoe->qos_pri_set = true; in qedf_get_protocol_tlv_data()
3888 fcoe->qos_pri = 3; /* Hard coded to 3 in driver */ in qedf_get_protocol_tlv_data()
3890 fcoe->ra_tov_set = true; in qedf_get_protocol_tlv_data()
3891 fcoe->ra_tov = lport->r_a_tov; in qedf_get_protocol_tlv_data()
3893 fcoe->ed_tov_set = true; in qedf_get_protocol_tlv_data()
3894 fcoe->ed_tov = lport->e_d_tov; in qedf_get_protocol_tlv_data()
3896 fcoe->npiv_state_set = true; in qedf_get_protocol_tlv_data()
3897 fcoe->npiv_state = 1; /* NPIV always enabled */ in qedf_get_protocol_tlv_data()
3899 fcoe->num_npiv_ids_set = true; in qedf_get_protocol_tlv_data()
3900 fcoe->num_npiv_ids = fc_host->npiv_vports_inuse; in qedf_get_protocol_tlv_data()
3903 if (qedf->ctlr.sel_fcf) { in qedf_get_protocol_tlv_data()
3904 fcoe->switch_name_set = true; in qedf_get_protocol_tlv_data()
3905 u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name); in qedf_get_protocol_tlv_data()
3908 fcoe->port_state_set = true; in qedf_get_protocol_tlv_data()
3910 if (lport->link_up) in qedf_get_protocol_tlv_data()
3911 fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC; in qedf_get_protocol_tlv_data()
3913 fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE; in qedf_get_protocol_tlv_data()
3915 fcoe->link_failures_set = true; in qedf_get_protocol_tlv_data()
3916 fcoe->link_failures = (u16)hst->link_failure_count; in qedf_get_protocol_tlv_data()
3918 fcoe->fcoe_txq_depth_set = true; in qedf_get_protocol_tlv_data()
3919 fcoe->fcoe_rxq_depth_set = true; in qedf_get_protocol_tlv_data()
3920 fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS; in qedf_get_protocol_tlv_data()
3921 fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS; in qedf_get_protocol_tlv_data()
3923 fcoe->fcoe_rx_frames_set = true; in qedf_get_protocol_tlv_data()
3924 fcoe->fcoe_rx_frames = hst->rx_frames; in qedf_get_protocol_tlv_data()
3926 fcoe->fcoe_tx_frames_set = true; in qedf_get_protocol_tlv_data()
3927 fcoe->fcoe_tx_frames = hst->tx_frames; in qedf_get_protocol_tlv_data()
3929 fcoe->fcoe_rx_bytes_set = true; in qedf_get_protocol_tlv_data()
3930 fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000; in qedf_get_protocol_tlv_data()
3932 fcoe->fcoe_tx_bytes_set = true; in qedf_get_protocol_tlv_data()
3933 fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000; in qedf_get_protocol_tlv_data()
3935 fcoe->crc_count_set = true; in qedf_get_protocol_tlv_data()
3936 fcoe->crc_count = hst->invalid_crc_count; in qedf_get_protocol_tlv_data()
3938 fcoe->tx_abts_set = true; in qedf_get_protocol_tlv_data()
3939 fcoe->tx_abts = hst->fcp_packet_aborts; in qedf_get_protocol_tlv_data()
3941 fcoe->tx_lun_rst_set = true; in qedf_get_protocol_tlv_data()
3942 fcoe->tx_lun_rst = qedf->lun_resets; in qedf_get_protocol_tlv_data()
3944 fcoe->abort_task_sets_set = true; in qedf_get_protocol_tlv_data()
3945 fcoe->abort_task_sets = qedf->packet_aborts; in qedf_get_protocol_tlv_data()
3947 fcoe->scsi_busy_set = true; in qedf_get_protocol_tlv_data()
3948 fcoe->scsi_busy = qedf->busy; in qedf_get_protocol_tlv_data()
3950 fcoe->scsi_tsk_full_set = true; in qedf_get_protocol_tlv_data()
3951 fcoe->scsi_tsk_full = qedf->task_set_fulls; in qedf_get_protocol_tlv_data()
3964 QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n"); in qedf_stag_change_work()
3965 qedf_ctx_soft_reset(qedf->lport); in qedf_stag_change_work()
3980 QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n"); in qedf_schedule_recovery_handler()
3981 schedule_delayed_work(&qedf->recovery_work, 0); in qedf_schedule_recovery_handler()
3989 if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags)) in qedf_recovery_handler()
3993 * Call common_ops->recovery_prolog to allow the MFW to quiesce in qedf_recovery_handler()
3996 qed_ops->common->recovery_prolog(qedf->cdev); in qedf_recovery_handler()
3998 QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n"); in qedf_recovery_handler()
3999 __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY); in qedf_recovery_handler()
4005 atomic_set(&qedf->link_state, QEDF_LINK_DOWN); in qedf_recovery_handler()
4006 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); in qedf_recovery_handler()
4007 __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY); in qedf_recovery_handler()
4008 clear_bit(QEDF_IN_RECOVERY, &qedf->flags); in qedf_recovery_handler()
4009 QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n"); in qedf_recovery_handler()
4025 ether_addr_copy(data->mac[0], qedf->mac); in qedf_get_generic_tlv_data()
4044 if (qedf_default_prio > -1) in qedf_init()
4118 return -EINVAL; in qedf_init()