Lines Matching +full:rx +full:- +full:tx

14  * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
27 ib->coalescing_timeo = coalescing_timeo; in bna_ib_coalescing_timeo_set()
28 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( in bna_ib_coalescing_timeo_set()
29 (u32)ib->coalescing_timeo, 0); in bna_ib_coalescing_timeo_set()
38 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
39 (rxf)->vlan_strip_pending = true; \
44 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
45 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
88 if (rxf->flags & BNA_RXF_F_PAUSED) { in bna_rxf_sm_stopped()
100 /* No-op */ in bna_rxf_sm_stopped()
108 rxf->flags |= BNA_RXF_F_PAUSED; in bna_rxf_sm_stopped()
113 rxf->flags &= ~BNA_RXF_F_PAUSED; in bna_rxf_sm_stopped()
142 rxf->flags &= ~BNA_RXF_F_PAUSED; in bna_rxf_sm_paused()
177 /* No-op */ in bna_rxf_sm_cfg_wait()
181 rxf->flags |= BNA_RXF_F_PAUSED; in bna_rxf_sm_cfg_wait()
221 rxf->flags |= BNA_RXF_F_PAUSED; in bna_rxf_sm_started()
284 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req; in bna_bfi_ucast_req()
286 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid); in bna_bfi_ucast_req()
287 req->mh.num_entries = htons( in bna_bfi_ucast_req()
289 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t)); in bna_bfi_ucast_req()
290 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_ucast_req()
291 sizeof(struct bfi_enet_ucast_req), &req->mh); in bna_bfi_ucast_req()
292 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_ucast_req()
299 &rxf->bfi_enet_cmd.mcast_add_req; in bna_bfi_mcast_add_req()
301 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ, in bna_bfi_mcast_add_req()
302 0, rxf->rx->rid); in bna_bfi_mcast_add_req()
303 req->mh.num_entries = htons( in bna_bfi_mcast_add_req()
305 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t)); in bna_bfi_mcast_add_req()
306 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_mcast_add_req()
307 sizeof(struct bfi_enet_mcast_add_req), &req->mh); in bna_bfi_mcast_add_req()
308 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_mcast_add_req()
315 &rxf->bfi_enet_cmd.mcast_del_req; in bna_bfi_mcast_del_req()
317 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ, in bna_bfi_mcast_del_req()
318 0, rxf->rx->rid); in bna_bfi_mcast_del_req()
319 req->mh.num_entries = htons( in bna_bfi_mcast_del_req()
321 req->handle = htons(handle); in bna_bfi_mcast_del_req()
322 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_mcast_del_req()
323 sizeof(struct bfi_enet_mcast_del_req), &req->mh); in bna_bfi_mcast_del_req()
324 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_mcast_del_req()
330 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; in bna_bfi_mcast_filter_req()
332 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_mcast_filter_req()
333 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid); in bna_bfi_mcast_filter_req()
334 req->mh.num_entries = htons( in bna_bfi_mcast_filter_req()
336 req->enable = status; in bna_bfi_mcast_filter_req()
337 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_mcast_filter_req()
338 sizeof(struct bfi_enet_enable_req), &req->mh); in bna_bfi_mcast_filter_req()
339 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_mcast_filter_req()
345 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; in bna_bfi_rx_promisc_req()
347 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_rx_promisc_req()
348 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid); in bna_bfi_rx_promisc_req()
349 req->mh.num_entries = htons( in bna_bfi_rx_promisc_req()
351 req->enable = status; in bna_bfi_rx_promisc_req()
352 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_rx_promisc_req()
353 sizeof(struct bfi_enet_enable_req), &req->mh); in bna_bfi_rx_promisc_req()
354 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rx_promisc_req()
360 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req; in bna_bfi_rx_vlan_filter_set()
364 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_rx_vlan_filter_set()
365 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid); in bna_bfi_rx_vlan_filter_set()
366 req->mh.num_entries = htons( in bna_bfi_rx_vlan_filter_set()
368 req->block_idx = block_idx; in bna_bfi_rx_vlan_filter_set()
371 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) in bna_bfi_rx_vlan_filter_set()
372 req->bit_mask[i] = in bna_bfi_rx_vlan_filter_set()
373 htonl(rxf->vlan_filter_table[j]); in bna_bfi_rx_vlan_filter_set()
375 req->bit_mask[i] = 0xFFFFFFFF; in bna_bfi_rx_vlan_filter_set()
377 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_rx_vlan_filter_set()
378 sizeof(struct bfi_enet_rx_vlan_req), &req->mh); in bna_bfi_rx_vlan_filter_set()
379 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rx_vlan_filter_set()
385 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; in bna_bfi_vlan_strip_enable()
387 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_vlan_strip_enable()
388 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid); in bna_bfi_vlan_strip_enable()
389 req->mh.num_entries = htons( in bna_bfi_vlan_strip_enable()
391 req->enable = rxf->vlan_strip_status; in bna_bfi_vlan_strip_enable()
392 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_vlan_strip_enable()
393 sizeof(struct bfi_enet_enable_req), &req->mh); in bna_bfi_vlan_strip_enable()
394 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_vlan_strip_enable()
400 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req; in bna_bfi_rit_cfg()
402 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_rit_cfg()
403 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid); in bna_bfi_rit_cfg()
404 req->mh.num_entries = htons( in bna_bfi_rit_cfg()
406 req->size = htons(rxf->rit_size); in bna_bfi_rit_cfg()
407 memcpy(&req->table[0], rxf->rit, rxf->rit_size); in bna_bfi_rit_cfg()
408 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_rit_cfg()
409 sizeof(struct bfi_enet_rit_req), &req->mh); in bna_bfi_rit_cfg()
410 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rit_cfg()
416 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req; in bna_bfi_rss_cfg()
419 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_rss_cfg()
420 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid); in bna_bfi_rss_cfg()
421 req->mh.num_entries = htons( in bna_bfi_rss_cfg()
423 req->cfg.type = rxf->rss_cfg.hash_type; in bna_bfi_rss_cfg()
424 req->cfg.mask = rxf->rss_cfg.hash_mask; in bna_bfi_rss_cfg()
426 req->cfg.key[i] = in bna_bfi_rss_cfg()
427 htonl(rxf->rss_cfg.toeplitz_hash_key[i]); in bna_bfi_rss_cfg()
428 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_rss_cfg()
429 sizeof(struct bfi_enet_rss_cfg_req), &req->mh); in bna_bfi_rss_cfg()
430 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rss_cfg()
436 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; in bna_bfi_rss_enable()
438 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_rss_enable()
439 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid); in bna_bfi_rss_enable()
440 req->mh.num_entries = htons( in bna_bfi_rss_enable()
442 req->enable = rxf->rss_status; in bna_bfi_rss_enable()
443 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, in bna_bfi_rss_enable()
444 sizeof(struct bfi_enet_enable_req), &req->mh); in bna_bfi_rss_enable()
445 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rss_enable()
455 list_for_each(qe, &rxf->mcast_active_q) { in bna_rxf_mcmac_get()
457 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr)) in bna_rxf_mcmac_get()
461 list_for_each(qe, &rxf->mcast_pending_del_q) { in bna_rxf_mcmac_get()
463 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr)) in bna_rxf_mcmac_get()
476 list_for_each(qe, &rxf->mcast_handle_q) { in bna_rxf_mchandle_get()
478 if (mchandle->handle == handle) in bna_rxf_mchandle_get()
494 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod); in bna_rxf_mchandle_attach()
495 mchandle->handle = handle; in bna_rxf_mchandle_attach()
496 mchandle->refcnt = 0; in bna_rxf_mchandle_attach()
497 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q); in bna_rxf_mchandle_attach()
499 mchandle->refcnt++; in bna_rxf_mchandle_attach()
500 mcmac->handle = mchandle; in bna_rxf_mchandle_attach()
510 mchandle = mac->handle; in bna_rxf_mcast_del()
514 mchandle->refcnt--; in bna_rxf_mcast_del()
515 if (mchandle->refcnt == 0) { in bna_rxf_mcast_del()
517 bna_bfi_mcast_del_req(rxf, mchandle->handle); in bna_rxf_mcast_del()
520 list_del(&mchandle->qe); in bna_rxf_mcast_del()
521 bfa_q_qe_init(&mchandle->qe); in bna_rxf_mcast_del()
522 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle); in bna_rxf_mcast_del()
524 mac->handle = NULL; in bna_rxf_mcast_del()
537 while (!list_empty(&rxf->mcast_pending_del_q)) { in bna_rxf_mcast_cfg_apply()
538 bfa_q_deq(&rxf->mcast_pending_del_q, &qe); in bna_rxf_mcast_cfg_apply()
542 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); in bna_rxf_mcast_cfg_apply()
548 if (!list_empty(&rxf->mcast_pending_add_q)) { in bna_rxf_mcast_cfg_apply()
549 bfa_q_deq(&rxf->mcast_pending_add_q, &qe); in bna_rxf_mcast_cfg_apply()
552 list_add_tail(&mac->qe, &rxf->mcast_active_q); in bna_rxf_mcast_cfg_apply()
566 if (rxf->vlan_pending_bitmask) { in bna_rxf_vlan_cfg_apply()
567 vlan_pending_bitmask = rxf->vlan_pending_bitmask; in bna_rxf_vlan_cfg_apply()
572 rxf->vlan_pending_bitmask &= ~(1 << block_idx); in bna_rxf_vlan_cfg_apply()
588 while (!list_empty(&rxf->mcast_pending_del_q)) { in bna_rxf_mcast_cfg_reset()
589 bfa_q_deq(&rxf->mcast_pending_del_q, &qe); in bna_rxf_mcast_cfg_reset()
593 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); in bna_rxf_mcast_cfg_reset()
599 while (!list_empty(&rxf->mcast_active_q)) { in bna_rxf_mcast_cfg_reset()
600 bfa_q_deq(&rxf->mcast_active_q, &qe); in bna_rxf_mcast_cfg_reset()
602 list_add_tail(qe, &rxf->mcast_pending_add_q); in bna_rxf_mcast_cfg_reset()
614 if (rxf->rss_pending) { in bna_rxf_rss_cfg_apply()
615 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) { in bna_rxf_rss_cfg_apply()
616 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING; in bna_rxf_rss_cfg_apply()
621 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) { in bna_rxf_rss_cfg_apply()
622 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING; in bna_rxf_rss_cfg_apply()
627 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) { in bna_rxf_rss_cfg_apply()
628 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING; in bna_rxf_rss_cfg_apply()
697 struct bna_rx *rx = rxf->rx; in bna_rit_init() local
702 rxf->rit_size = rit_size; in bna_rit_init()
703 list_for_each(qe, &rx->rxp_q) { in bna_rit_init()
705 rxf->rit[offset] = rxp->cq.ccb->id; in bna_rit_init()
722 &rxf->bfi_enet_cmd.mcast_add_req; in bna_bfi_rxf_mcast_add_rsp()
726 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr, in bna_bfi_rxf_mcast_add_rsp()
727 ntohs(rsp->handle)); in bna_bfi_rxf_mcast_add_rsp()
733 struct bna_rx *rx, in bna_rxf_init() argument
737 rxf->rx = rx; in bna_rxf_init()
739 INIT_LIST_HEAD(&rxf->ucast_pending_add_q); in bna_rxf_init()
740 INIT_LIST_HEAD(&rxf->ucast_pending_del_q); in bna_rxf_init()
741 rxf->ucast_pending_set = 0; in bna_rxf_init()
742 rxf->ucast_active_set = 0; in bna_rxf_init()
743 INIT_LIST_HEAD(&rxf->ucast_active_q); in bna_rxf_init()
744 rxf->ucast_pending_mac = NULL; in bna_rxf_init()
746 INIT_LIST_HEAD(&rxf->mcast_pending_add_q); in bna_rxf_init()
747 INIT_LIST_HEAD(&rxf->mcast_pending_del_q); in bna_rxf_init()
748 INIT_LIST_HEAD(&rxf->mcast_active_q); in bna_rxf_init()
749 INIT_LIST_HEAD(&rxf->mcast_handle_q); in bna_rxf_init()
751 if (q_config->paused) in bna_rxf_init()
752 rxf->flags |= BNA_RXF_F_PAUSED; in bna_rxf_init()
754 rxf->rit = (u8 *) in bna_rxf_init()
756 bna_rit_init(rxf, q_config->num_paths); in bna_rxf_init()
758 rxf->rss_status = q_config->rss_status; in bna_rxf_init()
759 if (rxf->rss_status == BNA_STATUS_T_ENABLED) { in bna_rxf_init()
760 rxf->rss_cfg = q_config->rss_config; in bna_rxf_init()
761 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING; in bna_rxf_init()
762 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING; in bna_rxf_init()
763 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING; in bna_rxf_init()
766 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; in bna_rxf_init()
767 memset(rxf->vlan_filter_table, 0, in bna_rxf_init()
769 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */ in bna_rxf_init()
770 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; in bna_rxf_init()
772 rxf->vlan_strip_status = q_config->vlan_strip_status; in bna_rxf_init()
782 rxf->ucast_pending_set = 0; in bna_rxf_uninit()
783 rxf->ucast_active_set = 0; in bna_rxf_uninit()
785 while (!list_empty(&rxf->ucast_pending_add_q)) { in bna_rxf_uninit()
786 bfa_q_deq(&rxf->ucast_pending_add_q, &mac); in bna_rxf_uninit()
787 bfa_q_qe_init(&mac->qe); in bna_rxf_uninit()
788 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); in bna_rxf_uninit()
791 if (rxf->ucast_pending_mac) { in bna_rxf_uninit()
792 bfa_q_qe_init(&rxf->ucast_pending_mac->qe); in bna_rxf_uninit()
793 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, in bna_rxf_uninit()
794 rxf->ucast_pending_mac); in bna_rxf_uninit()
795 rxf->ucast_pending_mac = NULL; in bna_rxf_uninit()
798 while (!list_empty(&rxf->mcast_pending_add_q)) { in bna_rxf_uninit()
799 bfa_q_deq(&rxf->mcast_pending_add_q, &mac); in bna_rxf_uninit()
800 bfa_q_qe_init(&mac->qe); in bna_rxf_uninit()
801 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); in bna_rxf_uninit()
804 rxf->rxmode_pending = 0; in bna_rxf_uninit()
805 rxf->rxmode_pending_bitmask = 0; in bna_rxf_uninit()
806 if (rxf->rx->bna->promisc_rid == rxf->rx->rid) in bna_rxf_uninit()
807 rxf->rx->bna->promisc_rid = BFI_INVALID_RID; in bna_rxf_uninit()
808 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid) in bna_rxf_uninit()
809 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID; in bna_rxf_uninit()
811 rxf->rss_pending = 0; in bna_rxf_uninit()
812 rxf->vlan_strip_pending = false; in bna_rxf_uninit()
814 rxf->flags = 0; in bna_rxf_uninit()
816 rxf->rx = NULL; in bna_rxf_uninit()
820 bna_rx_cb_rxf_started(struct bna_rx *rx) in bna_rx_cb_rxf_started() argument
822 bfa_fsm_send_event(rx, RX_E_RXF_STARTED); in bna_rx_cb_rxf_started()
828 rxf->start_cbfn = bna_rx_cb_rxf_started; in bna_rxf_start()
829 rxf->start_cbarg = rxf->rx; in bna_rxf_start()
834 bna_rx_cb_rxf_stopped(struct bna_rx *rx) in bna_rx_cb_rxf_stopped() argument
836 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); in bna_rx_cb_rxf_stopped()
842 rxf->stop_cbfn = bna_rx_cb_rxf_stopped; in bna_rxf_stop()
843 rxf->stop_cbarg = rxf->rx; in bna_rxf_stop()
854 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, in bna_rx_ucast_set() argument
857 struct bna_rxf *rxf = &rx->rxf; in bna_rx_ucast_set()
859 if (rxf->ucast_pending_mac == NULL) { in bna_rx_ucast_set()
860 rxf->ucast_pending_mac = in bna_rx_ucast_set()
861 bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod); in bna_rx_ucast_set()
862 if (rxf->ucast_pending_mac == NULL) in bna_rx_ucast_set()
864 bfa_q_qe_init(&rxf->ucast_pending_mac->qe); in bna_rx_ucast_set()
867 memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN); in bna_rx_ucast_set()
868 rxf->ucast_pending_set = 1; in bna_rx_ucast_set()
869 rxf->cam_fltr_cbfn = cbfn; in bna_rx_ucast_set()
870 rxf->cam_fltr_cbarg = rx->bna->bnad; in bna_rx_ucast_set()
878 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, in bna_rx_mcast_add() argument
881 struct bna_rxf *rxf = &rx->rxf; in bna_rx_mcast_add()
885 if (bna_mac_find(&rxf->mcast_active_q, addr) || in bna_rx_mcast_add()
886 bna_mac_find(&rxf->mcast_pending_add_q, addr)) { in bna_rx_mcast_add()
888 cbfn(rx->bna->bnad, rx); in bna_rx_mcast_add()
892 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); in bna_rx_mcast_add()
895 bfa_q_qe_init(&mac->qe); in bna_rx_mcast_add()
896 memcpy(mac->addr, addr, ETH_ALEN); in bna_rx_mcast_add()
897 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); in bna_rx_mcast_add()
899 rxf->cam_fltr_cbfn = cbfn; in bna_rx_mcast_add()
900 rxf->cam_fltr_cbarg = rx->bna->bnad; in bna_rx_mcast_add()
908 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, in bna_rx_mcast_listset() argument
911 struct bna_rxf *rxf = &rx->rxf; in bna_rx_mcast_listset()
921 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); in bna_rx_mcast_listset()
924 bfa_q_qe_init(&mac->qe); in bna_rx_mcast_listset()
925 memcpy(mac->addr, mcaddr, ETH_ALEN); in bna_rx_mcast_listset()
926 list_add_tail(&mac->qe, &list_head); in bna_rx_mcast_listset()
932 while (!list_empty(&rxf->mcast_pending_add_q)) { in bna_rx_mcast_listset()
933 bfa_q_deq(&rxf->mcast_pending_add_q, &qe); in bna_rx_mcast_listset()
936 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); in bna_rx_mcast_listset()
940 while (!list_empty(&rxf->mcast_active_q)) { in bna_rx_mcast_listset()
941 bfa_q_deq(&rxf->mcast_active_q, &qe); in bna_rx_mcast_listset()
943 bfa_q_qe_init(&mac->qe); in bna_rx_mcast_listset()
944 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q); in bna_rx_mcast_listset()
951 bfa_q_qe_init(&mac->qe); in bna_rx_mcast_listset()
952 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); in bna_rx_mcast_listset()
955 rxf->cam_fltr_cbfn = cbfn; in bna_rx_mcast_listset()
956 rxf->cam_fltr_cbarg = rx->bna->bnad; in bna_rx_mcast_listset()
965 bfa_q_qe_init(&mac->qe); in bna_rx_mcast_listset()
966 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); in bna_rx_mcast_listset()
973 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) in bna_rx_vlan_add() argument
975 struct bna_rxf *rxf = &rx->rxf; in bna_rx_vlan_add()
980 rxf->vlan_filter_table[index] |= bit; in bna_rx_vlan_add()
981 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { in bna_rx_vlan_add()
982 rxf->vlan_pending_bitmask |= (1 << group_id); in bna_rx_vlan_add()
988 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id) in bna_rx_vlan_del() argument
990 struct bna_rxf *rxf = &rx->rxf; in bna_rx_vlan_del()
995 rxf->vlan_filter_table[index] &= ~bit; in bna_rx_vlan_del()
996 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { in bna_rx_vlan_del()
997 rxf->vlan_pending_bitmask |= (1 << group_id); in bna_rx_vlan_del()
1009 if (!list_empty(&rxf->ucast_pending_del_q)) { in bna_rxf_ucast_cfg_apply()
1010 bfa_q_deq(&rxf->ucast_pending_del_q, &qe); in bna_rxf_ucast_cfg_apply()
1014 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); in bna_rxf_ucast_cfg_apply()
1019 if (rxf->ucast_pending_set) { in bna_rxf_ucast_cfg_apply()
1020 rxf->ucast_pending_set = 0; in bna_rxf_ucast_cfg_apply()
1021 memcpy(rxf->ucast_active_mac.addr, in bna_rxf_ucast_cfg_apply()
1022 rxf->ucast_pending_mac->addr, ETH_ALEN); in bna_rxf_ucast_cfg_apply()
1023 rxf->ucast_active_set = 1; in bna_rxf_ucast_cfg_apply()
1024 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, in bna_rxf_ucast_cfg_apply()
1030 if (!list_empty(&rxf->ucast_pending_add_q)) { in bna_rxf_ucast_cfg_apply()
1031 bfa_q_deq(&rxf->ucast_pending_add_q, &qe); in bna_rxf_ucast_cfg_apply()
1034 list_add_tail(&mac->qe, &rxf->ucast_active_q); in bna_rxf_ucast_cfg_apply()
1049 while (!list_empty(&rxf->ucast_pending_del_q)) { in bna_rxf_ucast_cfg_reset()
1050 bfa_q_deq(&rxf->ucast_pending_del_q, &qe); in bna_rxf_ucast_cfg_reset()
1054 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); in bna_rxf_ucast_cfg_reset()
1058 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); in bna_rxf_ucast_cfg_reset()
1064 while (!list_empty(&rxf->ucast_active_q)) { in bna_rxf_ucast_cfg_reset()
1065 bfa_q_deq(&rxf->ucast_active_q, &qe); in bna_rxf_ucast_cfg_reset()
1067 list_add_tail(qe, &rxf->ucast_pending_add_q); in bna_rxf_ucast_cfg_reset()
1076 if (rxf->ucast_active_set) { in bna_rxf_ucast_cfg_reset()
1077 rxf->ucast_pending_set = 1; in bna_rxf_ucast_cfg_reset()
1078 rxf->ucast_active_set = 0; in bna_rxf_ucast_cfg_reset()
1080 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, in bna_rxf_ucast_cfg_reset()
1092 struct bna *bna = rxf->rx->bna; in bna_rxf_promisc_cfg_apply()
1095 if (is_promisc_enable(rxf->rxmode_pending, in bna_rxf_promisc_cfg_apply()
1096 rxf->rxmode_pending_bitmask)) { in bna_rxf_promisc_cfg_apply()
1097 /* move promisc configuration from pending -> active */ in bna_rxf_promisc_cfg_apply()
1098 promisc_inactive(rxf->rxmode_pending, in bna_rxf_promisc_cfg_apply()
1099 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_cfg_apply()
1100 rxf->rxmode_active |= BNA_RXMODE_PROMISC; in bna_rxf_promisc_cfg_apply()
1103 } else if (is_promisc_disable(rxf->rxmode_pending, in bna_rxf_promisc_cfg_apply()
1104 rxf->rxmode_pending_bitmask)) { in bna_rxf_promisc_cfg_apply()
1105 /* move promisc configuration from pending -> active */ in bna_rxf_promisc_cfg_apply()
1106 promisc_inactive(rxf->rxmode_pending, in bna_rxf_promisc_cfg_apply()
1107 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_cfg_apply()
1108 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; in bna_rxf_promisc_cfg_apply()
1109 bna->promisc_rid = BFI_INVALID_RID; in bna_rxf_promisc_cfg_apply()
1120 struct bna *bna = rxf->rx->bna; in bna_rxf_promisc_cfg_reset()
1123 if (is_promisc_disable(rxf->rxmode_pending, in bna_rxf_promisc_cfg_reset()
1124 rxf->rxmode_pending_bitmask)) { in bna_rxf_promisc_cfg_reset()
1125 promisc_inactive(rxf->rxmode_pending, in bna_rxf_promisc_cfg_reset()
1126 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_cfg_reset()
1127 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; in bna_rxf_promisc_cfg_reset()
1128 bna->promisc_rid = BFI_INVALID_RID; in bna_rxf_promisc_cfg_reset()
1135 /* Move promisc mode config from active -> pending */ in bna_rxf_promisc_cfg_reset()
1136 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { in bna_rxf_promisc_cfg_reset()
1137 promisc_enable(rxf->rxmode_pending, in bna_rxf_promisc_cfg_reset()
1138 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_cfg_reset()
1139 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; in bna_rxf_promisc_cfg_reset()
1153 if (is_allmulti_enable(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_apply()
1154 rxf->rxmode_pending_bitmask)) { in bna_rxf_allmulti_cfg_apply()
1155 /* move allmulti configuration from pending -> active */ in bna_rxf_allmulti_cfg_apply()
1156 allmulti_inactive(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_apply()
1157 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_cfg_apply()
1158 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI; in bna_rxf_allmulti_cfg_apply()
1161 } else if (is_allmulti_disable(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_apply()
1162 rxf->rxmode_pending_bitmask)) { in bna_rxf_allmulti_cfg_apply()
1163 /* move allmulti configuration from pending -> active */ in bna_rxf_allmulti_cfg_apply()
1164 allmulti_inactive(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_apply()
1165 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_cfg_apply()
1166 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; in bna_rxf_allmulti_cfg_apply()
1178 if (is_allmulti_disable(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_reset()
1179 rxf->rxmode_pending_bitmask)) { in bna_rxf_allmulti_cfg_reset()
1180 allmulti_inactive(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_reset()
1181 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_cfg_reset()
1182 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; in bna_rxf_allmulti_cfg_reset()
1189 /* Move allmulti mode config from active -> pending */ in bna_rxf_allmulti_cfg_reset()
1190 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { in bna_rxf_allmulti_cfg_reset()
1191 allmulti_enable(rxf->rxmode_pending, in bna_rxf_allmulti_cfg_reset()
1192 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_cfg_reset()
1193 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; in bna_rxf_allmulti_cfg_reset()
1206 struct bna *bna = rxf->rx->bna; in bna_rxf_promisc_enable()
1209 if (is_promisc_enable(rxf->rxmode_pending, in bna_rxf_promisc_enable()
1210 rxf->rxmode_pending_bitmask) || in bna_rxf_promisc_enable()
1211 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) { in bna_rxf_promisc_enable()
1213 } else if (is_promisc_disable(rxf->rxmode_pending, in bna_rxf_promisc_enable()
1214 rxf->rxmode_pending_bitmask)) { in bna_rxf_promisc_enable()
1216 promisc_inactive(rxf->rxmode_pending, in bna_rxf_promisc_enable()
1217 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_enable()
1220 promisc_enable(rxf->rxmode_pending, in bna_rxf_promisc_enable()
1221 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_enable()
1222 bna->promisc_rid = rxf->rx->rid; in bna_rxf_promisc_enable()
1232 struct bna *bna = rxf->rx->bna; in bna_rxf_promisc_disable()
1235 if (is_promisc_disable(rxf->rxmode_pending, in bna_rxf_promisc_disable()
1236 rxf->rxmode_pending_bitmask) || in bna_rxf_promisc_disable()
1237 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) { in bna_rxf_promisc_disable()
1239 } else if (is_promisc_enable(rxf->rxmode_pending, in bna_rxf_promisc_disable()
1240 rxf->rxmode_pending_bitmask)) { in bna_rxf_promisc_disable()
1242 promisc_inactive(rxf->rxmode_pending, in bna_rxf_promisc_disable()
1243 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_disable()
1244 bna->promisc_rid = BFI_INVALID_RID; in bna_rxf_promisc_disable()
1245 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { in bna_rxf_promisc_disable()
1247 promisc_disable(rxf->rxmode_pending, in bna_rxf_promisc_disable()
1248 rxf->rxmode_pending_bitmask); in bna_rxf_promisc_disable()
1260 if (is_allmulti_enable(rxf->rxmode_pending, in bna_rxf_allmulti_enable()
1261 rxf->rxmode_pending_bitmask) || in bna_rxf_allmulti_enable()
1262 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) { in bna_rxf_allmulti_enable()
1264 } else if (is_allmulti_disable(rxf->rxmode_pending, in bna_rxf_allmulti_enable()
1265 rxf->rxmode_pending_bitmask)) { in bna_rxf_allmulti_enable()
1267 allmulti_inactive(rxf->rxmode_pending, in bna_rxf_allmulti_enable()
1268 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_enable()
1271 allmulti_enable(rxf->rxmode_pending, in bna_rxf_allmulti_enable()
1272 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_enable()
1284 if (is_allmulti_disable(rxf->rxmode_pending, in bna_rxf_allmulti_disable()
1285 rxf->rxmode_pending_bitmask) || in bna_rxf_allmulti_disable()
1286 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) { in bna_rxf_allmulti_disable()
1288 } else if (is_allmulti_enable(rxf->rxmode_pending, in bna_rxf_allmulti_disable()
1289 rxf->rxmode_pending_bitmask)) { in bna_rxf_allmulti_disable()
1291 allmulti_inactive(rxf->rxmode_pending, in bna_rxf_allmulti_disable()
1292 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_disable()
1293 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { in bna_rxf_allmulti_disable()
1295 allmulti_disable(rxf->rxmode_pending, in bna_rxf_allmulti_disable()
1296 rxf->rxmode_pending_bitmask); in bna_rxf_allmulti_disable()
1306 if (rxf->vlan_strip_pending) { in bna_rxf_vlan_strip_cfg_apply()
1307 rxf->vlan_strip_pending = false; in bna_rxf_vlan_strip_cfg_apply()
1316 * RX
1319 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1320 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1323 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1325 #define call_rx_stop_cbfn(rx) \ argument
1327 if ((rx)->stop_cbfn) { \
1330 cbfn = (rx)->stop_cbfn; \
1331 cbarg = (rx)->stop_cbarg; \
1332 (rx)->stop_cbfn = NULL; \
1333 (rx)->stop_cbarg = NULL; \
1334 cbfn(cbarg, rx); \
1338 #define call_rx_stall_cbfn(rx) \ argument
1340 if ((rx)->rx_stall_cbfn) \
1341 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1347 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1348 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1349 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1350 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1351 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1352 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1353 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1356 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1357 static void bna_rx_enet_stop(struct bna_rx *rx);
1358 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1379 static void bna_rx_sm_stopped_entry(struct bna_rx *rx) in bna_rx_sm_stopped_entry() argument
1381 call_rx_stop_cbfn(rx); in bna_rx_sm_stopped_entry()
1384 static void bna_rx_sm_stopped(struct bna_rx *rx, in bna_rx_sm_stopped() argument
1389 bfa_fsm_set_state(rx, bna_rx_sm_start_wait); in bna_rx_sm_stopped()
1393 call_rx_stop_cbfn(rx); in bna_rx_sm_stopped()
1397 /* no-op */ in bna_rx_sm_stopped()
1406 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx) in bna_rx_sm_start_wait_entry() argument
1408 bna_bfi_rx_enet_start(rx); in bna_rx_sm_start_wait_entry()
1412 bna_rx_sm_stop_wait_entry(struct bna_rx *rx) in bna_rx_sm_stop_wait_entry() argument
1417 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event) in bna_rx_sm_stop_wait() argument
1422 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); in bna_rx_sm_stop_wait()
1423 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); in bna_rx_sm_stop_wait()
1427 bna_rx_enet_stop(rx); in bna_rx_sm_stop_wait()
1436 static void bna_rx_sm_start_wait(struct bna_rx *rx, in bna_rx_sm_start_wait() argument
1441 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); in bna_rx_sm_start_wait()
1445 bfa_fsm_set_state(rx, bna_rx_sm_stopped); in bna_rx_sm_start_wait()
1449 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait); in bna_rx_sm_start_wait()
1458 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) in bna_rx_sm_rxf_start_wait_entry() argument
1460 rx->rx_post_cbfn(rx->bna->bnad, rx); in bna_rx_sm_rxf_start_wait_entry()
1461 bna_rxf_start(&rx->rxf); in bna_rx_sm_rxf_start_wait_entry()
1465 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) in bna_rx_sm_rxf_stop_wait_entry() argument
1470 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) in bna_rx_sm_rxf_stop_wait() argument
1474 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); in bna_rx_sm_rxf_stop_wait()
1475 bna_rxf_fail(&rx->rxf); in bna_rx_sm_rxf_stop_wait()
1476 call_rx_stall_cbfn(rx); in bna_rx_sm_rxf_stop_wait()
1477 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); in bna_rx_sm_rxf_stop_wait()
1481 bna_rxf_stop(&rx->rxf); in bna_rx_sm_rxf_stop_wait()
1485 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); in bna_rx_sm_rxf_stop_wait()
1486 call_rx_stall_cbfn(rx); in bna_rx_sm_rxf_stop_wait()
1487 bna_rx_enet_stop(rx); in bna_rx_sm_rxf_stop_wait()
1498 bna_rx_sm_started_entry(struct bna_rx *rx) in bna_rx_sm_started_entry() argument
1502 int is_regular = (rx->type == BNA_RX_T_REGULAR); in bna_rx_sm_started_entry()
1505 list_for_each(qe_rxp, &rx->rxp_q) { in bna_rx_sm_started_entry()
1507 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular); in bna_rx_sm_started_entry()
1510 bna_ethport_cb_rx_started(&rx->bna->ethport); in bna_rx_sm_started_entry()
1514 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) in bna_rx_sm_started() argument
1518 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); in bna_rx_sm_started()
1519 bna_ethport_cb_rx_stopped(&rx->bna->ethport); in bna_rx_sm_started()
1520 bna_rxf_stop(&rx->rxf); in bna_rx_sm_started()
1524 bfa_fsm_set_state(rx, bna_rx_sm_failed); in bna_rx_sm_started()
1525 bna_ethport_cb_rx_stopped(&rx->bna->ethport); in bna_rx_sm_started()
1526 bna_rxf_fail(&rx->rxf); in bna_rx_sm_started()
1527 call_rx_stall_cbfn(rx); in bna_rx_sm_started()
1528 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); in bna_rx_sm_started()
1537 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, in bna_rx_sm_rxf_start_wait() argument
1542 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); in bna_rx_sm_rxf_start_wait()
1546 bfa_fsm_set_state(rx, bna_rx_sm_failed); in bna_rx_sm_rxf_start_wait()
1547 bna_rxf_fail(&rx->rxf); in bna_rx_sm_rxf_start_wait()
1548 call_rx_stall_cbfn(rx); in bna_rx_sm_rxf_start_wait()
1549 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); in bna_rx_sm_rxf_start_wait()
1553 bfa_fsm_set_state(rx, bna_rx_sm_started); in bna_rx_sm_rxf_start_wait()
1563 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) in bna_rx_sm_cleanup_wait_entry() argument
1568 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) in bna_rx_sm_cleanup_wait() argument
1573 /* No-op */ in bna_rx_sm_cleanup_wait()
1577 bfa_fsm_set_state(rx, bna_rx_sm_stopped); in bna_rx_sm_cleanup_wait()
1587 bna_rx_sm_failed_entry(struct bna_rx *rx) in bna_rx_sm_failed_entry() argument
1592 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event) in bna_rx_sm_failed() argument
1596 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait); in bna_rx_sm_failed()
1600 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); in bna_rx_sm_failed()
1606 /* No-op */ in bna_rx_sm_failed()
1610 bfa_fsm_set_state(rx, bna_rx_sm_stopped); in bna_rx_sm_failed()
1619 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx) in bna_rx_sm_quiesce_wait_entry() argument
1624 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event) in bna_rx_sm_quiesce_wait() argument
1628 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); in bna_rx_sm_quiesce_wait()
1632 bfa_fsm_set_state(rx, bna_rx_sm_failed); in bna_rx_sm_quiesce_wait()
1636 bfa_fsm_set_state(rx, bna_rx_sm_start_wait); in bna_rx_sm_quiesce_wait()
1646 bna_bfi_rx_enet_start(struct bna_rx *rx) in bna_bfi_rx_enet_start() argument
1648 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req; in bna_bfi_rx_enet_start()
1654 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, in bna_bfi_rx_enet_start()
1655 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid); in bna_bfi_rx_enet_start()
1656 cfg_req->mh.num_entries = htons( in bna_bfi_rx_enet_start()
1659 cfg_req->num_queue_sets = rx->num_paths; in bna_bfi_rx_enet_start()
1660 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); in bna_bfi_rx_enet_start()
1661 i < rx->num_paths; in bna_bfi_rx_enet_start()
1666 switch (rxp->type) { in bna_bfi_rx_enet_start()
1670 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q, in bna_bfi_rx_enet_start()
1671 &q1->qpt); in bna_bfi_rx_enet_start()
1672 cfg_req->q_cfg[i].qs.rx_buffer_size = in bna_bfi_rx_enet_start()
1673 htons((u16)q1->buffer_size); in bna_bfi_rx_enet_start()
1678 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, in bna_bfi_rx_enet_start()
1679 &q0->qpt); in bna_bfi_rx_enet_start()
1680 q0->buffer_size = in bna_bfi_rx_enet_start()
1681 bna_enet_mtu_get(&rx->bna->enet); in bna_bfi_rx_enet_start()
1682 cfg_req->q_cfg[i].ql.rx_buffer_size = in bna_bfi_rx_enet_start()
1683 htons((u16)q0->buffer_size); in bna_bfi_rx_enet_start()
1690 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, in bna_bfi_rx_enet_start()
1691 &rxp->cq.qpt); in bna_bfi_rx_enet_start()
1693 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = in bna_bfi_rx_enet_start()
1694 rxp->cq.ib.ib_seg_host_addr.lsb; in bna_bfi_rx_enet_start()
1695 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = in bna_bfi_rx_enet_start()
1696 rxp->cq.ib.ib_seg_host_addr.msb; in bna_bfi_rx_enet_start()
1697 cfg_req->q_cfg[i].ib.intr.msix_index = in bna_bfi_rx_enet_start()
1698 htons((u16)rxp->cq.ib.intr_vector); in bna_bfi_rx_enet_start()
1701 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED; in bna_bfi_rx_enet_start()
1702 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; in bna_bfi_rx_enet_start()
1703 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; in bna_bfi_rx_enet_start()
1704 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED; in bna_bfi_rx_enet_start()
1705 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX) in bna_bfi_rx_enet_start()
1708 cfg_req->ib_cfg.coalescing_timeout = in bna_bfi_rx_enet_start()
1709 htonl((u32)rxp->cq.ib.coalescing_timeo); in bna_bfi_rx_enet_start()
1710 cfg_req->ib_cfg.inter_pkt_timeout = in bna_bfi_rx_enet_start()
1711 htonl((u32)rxp->cq.ib.interpkt_timeo); in bna_bfi_rx_enet_start()
1712 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count; in bna_bfi_rx_enet_start()
1714 switch (rxp->type) { in bna_bfi_rx_enet_start()
1716 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL; in bna_bfi_rx_enet_start()
1720 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS; in bna_bfi_rx_enet_start()
1721 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type; in bna_bfi_rx_enet_start()
1722 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset; in bna_bfi_rx_enet_start()
1723 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset; in bna_bfi_rx_enet_start()
1727 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE; in bna_bfi_rx_enet_start()
1733 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status; in bna_bfi_rx_enet_start()
1735 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, in bna_bfi_rx_enet_start()
1736 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh); in bna_bfi_rx_enet_start()
1737 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); in bna_bfi_rx_enet_start()
1741 bna_bfi_rx_enet_stop(struct bna_rx *rx) in bna_bfi_rx_enet_stop() argument
1743 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req; in bna_bfi_rx_enet_stop()
1745 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_rx_enet_stop()
1746 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid); in bna_bfi_rx_enet_stop()
1747 req->mh.num_entries = htons( in bna_bfi_rx_enet_stop()
1749 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), in bna_bfi_rx_enet_stop()
1750 &req->mh); in bna_bfi_rx_enet_stop()
1751 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); in bna_bfi_rx_enet_stop()
1755 bna_rx_enet_stop(struct bna_rx *rx) in bna_rx_enet_stop() argument
1761 list_for_each(qe_rxp, &rx->rxp_q) { in bna_rx_enet_stop()
1763 bna_ib_stop(rx->bna, &rxp->cq.ib); in bna_rx_enet_stop()
1766 bna_bfi_rx_enet_stop(rx); in bna_rx_enet_stop()
1772 if ((rx_mod->rx_free_count == 0) || in bna_rx_res_check()
1773 (rx_mod->rxp_free_count == 0) || in bna_rx_res_check()
1774 (rx_mod->rxq_free_count == 0)) in bna_rx_res_check()
1777 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) { in bna_rx_res_check()
1778 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || in bna_rx_res_check()
1779 (rx_mod->rxq_free_count < rx_cfg->num_paths)) in bna_rx_res_check()
1782 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || in bna_rx_res_check()
1783 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) in bna_rx_res_check()
1796 bfa_q_deq(&rx_mod->rxq_free_q, &qe); in bna_rxq_get()
1797 rx_mod->rxq_free_count--; in bna_rxq_get()
1799 bfa_q_qe_init(&rxq->qe); in bna_rxq_get()
1807 bfa_q_qe_init(&rxq->qe); in bna_rxq_put()
1808 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q); in bna_rxq_put()
1809 rx_mod->rxq_free_count++; in bna_rxq_put()
1818 bfa_q_deq(&rx_mod->rxp_free_q, &qe); in bna_rxp_get()
1819 rx_mod->rxp_free_count--; in bna_rxp_get()
1821 bfa_q_qe_init(&rxp->qe); in bna_rxp_get()
1829 bfa_q_qe_init(&rxp->qe); in bna_rxp_put()
1830 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q); in bna_rxp_put()
1831 rx_mod->rxp_free_count++; in bna_rxp_put()
1838 struct bna_rx *rx = NULL; in bna_rx_get() local
1841 bfa_q_deq(&rx_mod->rx_free_q, &qe); in bna_rx_get()
1843 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe); in bna_rx_get()
1845 rx_mod->rx_free_count--; in bna_rx_get()
1846 rx = (struct bna_rx *)qe; in bna_rx_get()
1847 bfa_q_qe_init(&rx->qe); in bna_rx_get()
1848 list_add_tail(&rx->qe, &rx_mod->rx_active_q); in bna_rx_get()
1849 rx->type = type; in bna_rx_get()
1851 return rx; in bna_rx_get()
1855 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx) in bna_rx_put() argument
1860 bfa_q_qe_init(&rx->qe); in bna_rx_put()
1862 list_for_each(qe, &rx_mod->rx_free_q) { in bna_rx_put()
1863 if (((struct bna_rx *)qe)->rid < rx->rid) in bna_rx_put()
1871 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe); in bna_rx_put()
1872 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) { in bna_rx_put()
1874 list_add_tail(&rx->qe, &rx_mod->rx_free_q); in bna_rx_put()
1877 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe); in bna_rx_put()
1878 bfa_q_prev(&rx->qe) = prev_qe; in bna_rx_put()
1879 bfa_q_next(prev_qe) = &rx->qe; in bna_rx_put()
1880 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe; in bna_rx_put()
1883 rx_mod->rx_free_count++; in bna_rx_put()
1890 switch (rxp->type) { in bna_rxp_add_rxqs()
1892 rxp->rxq.single.only = q0; in bna_rxp_add_rxqs()
1893 rxp->rxq.single.reserved = NULL; in bna_rxp_add_rxqs()
1896 rxp->rxq.slr.large = q0; in bna_rxp_add_rxqs()
1897 rxp->rxq.slr.small = q1; in bna_rxp_add_rxqs()
1900 rxp->rxq.hds.data = q0; in bna_rxp_add_rxqs()
1901 rxp->rxq.hds.hdr = q1; in bna_rxp_add_rxqs()
1919 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; in bna_rxq_qpt_setup()
1920 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; in bna_rxq_qpt_setup()
1921 rxq->qpt.kv_qpt_ptr = qpt_mem->kva; in bna_rxq_qpt_setup()
1922 rxq->qpt.page_count = page_count; in bna_rxq_qpt_setup()
1923 rxq->qpt.page_size = page_size; in bna_rxq_qpt_setup()
1925 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; in bna_rxq_qpt_setup()
1927 for (i = 0; i < rxq->qpt.page_count; i++) { in bna_rxq_qpt_setup()
1928 rxq->rcb->sw_qpt[i] = page_mem[i].kva; in bna_rxq_qpt_setup()
1929 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = in bna_rxq_qpt_setup()
1931 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = in bna_rxq_qpt_setup()
1946 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; in bna_rxp_cqpt_setup()
1947 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; in bna_rxp_cqpt_setup()
1948 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; in bna_rxp_cqpt_setup()
1949 rxp->cq.qpt.page_count = page_count; in bna_rxp_cqpt_setup()
1950 rxp->cq.qpt.page_size = page_size; in bna_rxp_cqpt_setup()
1952 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; in bna_rxp_cqpt_setup()
1954 for (i = 0; i < rxp->cq.qpt.page_count; i++) { in bna_rxp_cqpt_setup()
1955 rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva; in bna_rxp_cqpt_setup()
1957 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = in bna_rxp_cqpt_setup()
1959 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = in bna_rxp_cqpt_setup()
1965 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx) in bna_rx_mod_cb_rx_stopped() argument
1969 bfa_wc_down(&rx_mod->rx_stop_wc); in bna_rx_mod_cb_rx_stopped()
1977 if (rx_mod->stop_cbfn) in bna_rx_mod_cb_rx_stopped_all()
1978 rx_mod->stop_cbfn(&rx_mod->bna->enet); in bna_rx_mod_cb_rx_stopped_all()
1979 rx_mod->stop_cbfn = NULL; in bna_rx_mod_cb_rx_stopped_all()
1983 bna_rx_start(struct bna_rx *rx) in bna_rx_start() argument
1985 rx->rx_flags |= BNA_RX_F_ENET_STARTED; in bna_rx_start()
1986 if (rx->rx_flags & BNA_RX_F_ENABLED) in bna_rx_start()
1987 bfa_fsm_send_event(rx, RX_E_START); in bna_rx_start()
1991 bna_rx_stop(struct bna_rx *rx) in bna_rx_stop() argument
1993 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; in bna_rx_stop()
1994 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped) in bna_rx_stop()
1995 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx); in bna_rx_stop()
1997 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; in bna_rx_stop()
1998 rx->stop_cbarg = &rx->bna->rx_mod; in bna_rx_stop()
1999 bfa_fsm_send_event(rx, RX_E_STOP); in bna_rx_stop()
2004 bna_rx_fail(struct bna_rx *rx) in bna_rx_fail() argument
2007 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; in bna_rx_fail()
2008 bfa_fsm_send_event(rx, RX_E_FAIL); in bna_rx_fail()
2014 struct bna_rx *rx; in bna_rx_mod_start() local
2017 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED; in bna_rx_mod_start()
2019 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK; in bna_rx_mod_start()
2021 list_for_each(qe, &rx_mod->rx_active_q) { in bna_rx_mod_start()
2022 rx = (struct bna_rx *)qe; in bna_rx_mod_start()
2023 if (rx->type == type) in bna_rx_mod_start()
2024 bna_rx_start(rx); in bna_rx_mod_start()
2031 struct bna_rx *rx; in bna_rx_mod_stop() local
2034 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; in bna_rx_mod_stop()
2035 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; in bna_rx_mod_stop()
2037 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped; in bna_rx_mod_stop()
2039 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod); in bna_rx_mod_stop()
2041 list_for_each(qe, &rx_mod->rx_active_q) { in bna_rx_mod_stop()
2042 rx = (struct bna_rx *)qe; in bna_rx_mod_stop()
2043 if (rx->type == type) { in bna_rx_mod_stop()
2044 bfa_wc_up(&rx_mod->rx_stop_wc); in bna_rx_mod_stop()
2045 bna_rx_stop(rx); in bna_rx_mod_stop()
2049 bfa_wc_wait(&rx_mod->rx_stop_wc); in bna_rx_mod_stop()
2055 struct bna_rx *rx; in bna_rx_mod_fail() local
2058 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; in bna_rx_mod_fail()
2059 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; in bna_rx_mod_fail()
2061 list_for_each(qe, &rx_mod->rx_active_q) { in bna_rx_mod_fail()
2062 rx = (struct bna_rx *)qe; in bna_rx_mod_fail()
2063 bna_rx_fail(rx); in bna_rx_mod_fail()
2075 rx_mod->bna = bna; in bna_rx_mod_init()
2076 rx_mod->flags = 0; in bna_rx_mod_init()
2078 rx_mod->rx = (struct bna_rx *) in bna_rx_mod_init()
2080 rx_mod->rxp = (struct bna_rxp *) in bna_rx_mod_init()
2082 rx_mod->rxq = (struct bna_rxq *) in bna_rx_mod_init()
2086 INIT_LIST_HEAD(&rx_mod->rx_free_q); in bna_rx_mod_init()
2087 rx_mod->rx_free_count = 0; in bna_rx_mod_init()
2088 INIT_LIST_HEAD(&rx_mod->rxq_free_q); in bna_rx_mod_init()
2089 rx_mod->rxq_free_count = 0; in bna_rx_mod_init()
2090 INIT_LIST_HEAD(&rx_mod->rxp_free_q); in bna_rx_mod_init()
2091 rx_mod->rxp_free_count = 0; in bna_rx_mod_init()
2092 INIT_LIST_HEAD(&rx_mod->rx_active_q); in bna_rx_mod_init()
2094 /* Build RX queues */ in bna_rx_mod_init()
2095 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { in bna_rx_mod_init()
2096 rx_ptr = &rx_mod->rx[index]; in bna_rx_mod_init()
2098 bfa_q_qe_init(&rx_ptr->qe); in bna_rx_mod_init()
2099 INIT_LIST_HEAD(&rx_ptr->rxp_q); in bna_rx_mod_init()
2100 rx_ptr->bna = NULL; in bna_rx_mod_init()
2101 rx_ptr->rid = index; in bna_rx_mod_init()
2102 rx_ptr->stop_cbfn = NULL; in bna_rx_mod_init()
2103 rx_ptr->stop_cbarg = NULL; in bna_rx_mod_init()
2105 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q); in bna_rx_mod_init()
2106 rx_mod->rx_free_count++; in bna_rx_mod_init()
2109 /* build RX-path queue */ in bna_rx_mod_init()
2110 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { in bna_rx_mod_init()
2111 rxp_ptr = &rx_mod->rxp[index]; in bna_rx_mod_init()
2112 bfa_q_qe_init(&rxp_ptr->qe); in bna_rx_mod_init()
2113 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q); in bna_rx_mod_init()
2114 rx_mod->rxp_free_count++; in bna_rx_mod_init()
2118 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) { in bna_rx_mod_init()
2119 rxq_ptr = &rx_mod->rxq[index]; in bna_rx_mod_init()
2120 bfa_q_qe_init(&rxq_ptr->qe); in bna_rx_mod_init()
2121 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q); in bna_rx_mod_init()
2122 rx_mod->rxq_free_count++; in bna_rx_mod_init()
2133 list_for_each(qe, &rx_mod->rx_free_q) in bna_rx_mod_uninit()
2137 list_for_each(qe, &rx_mod->rxp_free_q) in bna_rx_mod_uninit()
2141 list_for_each(qe, &rx_mod->rxq_free_q) in bna_rx_mod_uninit()
2144 rx_mod->bna = NULL; in bna_rx_mod_uninit()
2148 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) in bna_bfi_rx_enet_start_rsp() argument
2150 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp; in bna_bfi_rx_enet_start_rsp()
2156 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp, in bna_bfi_rx_enet_start_rsp()
2159 rx->hw_id = cfg_rsp->hw_id; in bna_bfi_rx_enet_start_rsp()
2161 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); in bna_bfi_rx_enet_start_rsp()
2162 i < rx->num_paths; in bna_bfi_rx_enet_start_rsp()
2168 rxp->cq.ccb->i_dbell->doorbell_addr = in bna_bfi_rx_enet_start_rsp()
2169 rx->bna->pcidev.pci_bar_kva in bna_bfi_rx_enet_start_rsp()
2170 + ntohl(cfg_rsp->q_handles[i].i_dbell); in bna_bfi_rx_enet_start_rsp()
2171 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid; in bna_bfi_rx_enet_start_rsp()
2172 q0->rcb->q_dbell = in bna_bfi_rx_enet_start_rsp()
2173 rx->bna->pcidev.pci_bar_kva in bna_bfi_rx_enet_start_rsp()
2174 + ntohl(cfg_rsp->q_handles[i].ql_dbell); in bna_bfi_rx_enet_start_rsp()
2175 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid; in bna_bfi_rx_enet_start_rsp()
2177 q1->rcb->q_dbell = in bna_bfi_rx_enet_start_rsp()
2178 rx->bna->pcidev.pci_bar_kva in bna_bfi_rx_enet_start_rsp()
2179 + ntohl(cfg_rsp->q_handles[i].qs_dbell); in bna_bfi_rx_enet_start_rsp()
2180 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid; in bna_bfi_rx_enet_start_rsp()
2184 (*rxp->cq.ccb->hw_producer_index) = 0; in bna_bfi_rx_enet_start_rsp()
2185 rxp->cq.ccb->producer_index = 0; in bna_bfi_rx_enet_start_rsp()
2186 q0->rcb->producer_index = q0->rcb->consumer_index = 0; in bna_bfi_rx_enet_start_rsp()
2188 q1->rcb->producer_index = q1->rcb->consumer_index = 0; in bna_bfi_rx_enet_start_rsp()
2191 bfa_fsm_send_event(rx, RX_E_STARTED); in bna_bfi_rx_enet_start_rsp()
2195 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) in bna_bfi_rx_enet_stop_rsp() argument
2197 bfa_fsm_send_event(rx, RX_E_STOPPED); in bna_bfi_rx_enet_stop_rsp()
2210 dq_depth = q_cfg->q_depth; in bna_rx_res_req()
2211 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth); in bna_rx_res_req()
2224 if (BNA_RXP_SINGLE != q_cfg->rxp_type) { in bna_rx_res_req()
2234 mem_info->mem_type = BNA_MEM_T_KVA; in bna_rx_res_req()
2235 mem_info->len = sizeof(struct bna_ccb); in bna_rx_res_req()
2236 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2240 mem_info->mem_type = BNA_MEM_T_KVA; in bna_rx_res_req()
2241 mem_info->len = sizeof(struct bna_rcb); in bna_rx_res_req()
2242 mem_info->num = BNA_GET_RXQS(q_cfg); in bna_rx_res_req()
2246 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2247 mem_info->len = cpage_count * sizeof(struct bna_dma_addr); in bna_rx_res_req()
2248 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2252 mem_info->mem_type = BNA_MEM_T_KVA; in bna_rx_res_req()
2253 mem_info->len = cpage_count * sizeof(void *); in bna_rx_res_req()
2254 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2258 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2259 mem_info->len = PAGE_SIZE; in bna_rx_res_req()
2260 mem_info->num = cpage_count * q_cfg->num_paths; in bna_rx_res_req()
2264 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2265 mem_info->len = dpage_count * sizeof(struct bna_dma_addr); in bna_rx_res_req()
2266 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2270 mem_info->mem_type = BNA_MEM_T_KVA; in bna_rx_res_req()
2271 mem_info->len = dpage_count * sizeof(void *); in bna_rx_res_req()
2272 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2276 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2277 mem_info->len = PAGE_SIZE; in bna_rx_res_req()
2278 mem_info->num = dpage_count * q_cfg->num_paths; in bna_rx_res_req()
2282 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2283 mem_info->len = hpage_count * sizeof(struct bna_dma_addr); in bna_rx_res_req()
2284 mem_info->num = (hpage_count ? q_cfg->num_paths : 0); in bna_rx_res_req()
2288 mem_info->mem_type = BNA_MEM_T_KVA; in bna_rx_res_req()
2289 mem_info->len = hpage_count * sizeof(void *); in bna_rx_res_req()
2290 mem_info->num = (hpage_count ? q_cfg->num_paths : 0); in bna_rx_res_req()
2294 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2295 mem_info->len = (hpage_count ? PAGE_SIZE : 0); in bna_rx_res_req()
2296 mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0); in bna_rx_res_req()
2300 mem_info->mem_type = BNA_MEM_T_DMA; in bna_rx_res_req()
2301 mem_info->len = BFI_IBIDX_SIZE; in bna_rx_res_req()
2302 mem_info->num = q_cfg->num_paths; in bna_rx_res_req()
2306 mem_info->mem_type = BNA_MEM_T_KVA; in bna_rx_res_req()
2307 mem_info->len = BFI_ENET_RSS_RIT_MAX; in bna_rx_res_req()
2308 mem_info->num = 1; in bna_rx_res_req()
2312 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths; in bna_rx_res_req()
2322 struct bna_rx_mod *rx_mod = &bna->rx_mod; in bna_rx_create()
2323 struct bna_rx *rx; in bna_rx_create() local
2362 rx_cfg->num_paths; in bna_rx_create()
2365 rx_cfg->num_paths; in bna_rx_create()
2368 rx_cfg->num_paths; in bna_rx_create()
2370 rx = bna_rx_get(rx_mod, rx_cfg->rx_type); in bna_rx_create()
2371 rx->bna = bna; in bna_rx_create()
2372 rx->rx_flags = 0; in bna_rx_create()
2373 INIT_LIST_HEAD(&rx->rxp_q); in bna_rx_create()
2374 rx->stop_cbfn = NULL; in bna_rx_create()
2375 rx->stop_cbarg = NULL; in bna_rx_create()
2376 rx->priv = priv; in bna_rx_create()
2378 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn; in bna_rx_create()
2379 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; in bna_rx_create()
2380 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; in bna_rx_create()
2381 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; in bna_rx_create()
2382 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn; in bna_rx_create()
2384 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; in bna_rx_create()
2385 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; in bna_rx_create()
2387 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) { in bna_rx_create()
2388 switch (rx->type) { in bna_rx_create()
2390 if (!(rx->bna->rx_mod.flags & in bna_rx_create()
2392 rx->rx_flags |= BNA_RX_F_ENET_STARTED; in bna_rx_create()
2395 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK) in bna_rx_create()
2396 rx->rx_flags |= BNA_RX_F_ENET_STARTED; in bna_rx_create()
2401 rx->num_paths = rx_cfg->num_paths; in bna_rx_create()
2402 for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) { in bna_rx_create()
2404 list_add_tail(&rxp->qe, &rx->rxp_q); in bna_rx_create()
2405 rxp->type = rx_cfg->rxp_type; in bna_rx_create()
2406 rxp->rx = rx; in bna_rx_create()
2407 rxp->cq.rx = rx; in bna_rx_create()
2410 if (BNA_RXP_SINGLE == rx_cfg->rxp_type) in bna_rx_create()
2415 if (1 == intr_info->num) in bna_rx_create()
2416 rxp->vector = intr_info->idl[0].vector; in bna_rx_create()
2418 rxp->vector = intr_info->idl[i].vector; in bna_rx_create()
2422 rxp->cq.ib.ib_seg_host_addr.lsb = in bna_rx_create()
2424 rxp->cq.ib.ib_seg_host_addr.msb = in bna_rx_create()
2426 rxp->cq.ib.ib_seg_host_addr_kva = in bna_rx_create()
2428 rxp->cq.ib.intr_type = intr_info->intr_type; in bna_rx_create()
2429 if (intr_info->intr_type == BNA_INTR_T_MSIX) in bna_rx_create()
2430 rxp->cq.ib.intr_vector = rxp->vector; in bna_rx_create()
2432 rxp->cq.ib.intr_vector = (1 << rxp->vector); in bna_rx_create()
2433 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo; in bna_rx_create()
2434 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT; in bna_rx_create()
2435 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; in bna_rx_create()
2441 q0->rx = rx; in bna_rx_create()
2442 q0->rxp = rxp; in bna_rx_create()
2444 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; in bna_rx_create()
2445 q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; in bna_rx_create()
2447 q0->rcb->q_depth = rx_cfg->q_depth; in bna_rx_create()
2448 q0->rcb->rxq = q0; in bna_rx_create()
2449 q0->rcb->bnad = bna->bnad; in bna_rx_create()
2450 q0->rcb->id = 0; in bna_rx_create()
2451 q0->rx_packets = q0->rx_bytes = 0; in bna_rx_create()
2452 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; in bna_rx_create()
2456 q0->rcb->page_idx = dpage_idx; in bna_rx_create()
2457 q0->rcb->page_count = dpage_count; in bna_rx_create()
2460 if (rx->rcb_setup_cbfn) in bna_rx_create()
2461 rx->rcb_setup_cbfn(bnad, q0->rcb); in bna_rx_create()
2466 q1->rx = rx; in bna_rx_create()
2467 q1->rxp = rxp; in bna_rx_create()
2469 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; in bna_rx_create()
2470 q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; in bna_rx_create()
2472 q1->rcb->q_depth = rx_cfg->q_depth; in bna_rx_create()
2473 q1->rcb->rxq = q1; in bna_rx_create()
2474 q1->rcb->bnad = bna->bnad; in bna_rx_create()
2475 q1->rcb->id = 1; in bna_rx_create()
2476 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? in bna_rx_create()
2477 rx_cfg->hds_config.forced_offset in bna_rx_create()
2478 : rx_cfg->small_buff_size; in bna_rx_create()
2479 q1->rx_packets = q1->rx_bytes = 0; in bna_rx_create()
2480 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; in bna_rx_create()
2485 q1->rcb->page_idx = hpage_idx; in bna_rx_create()
2486 q1->rcb->page_count = hpage_count; in bna_rx_create()
2489 if (rx->rcb_setup_cbfn) in bna_rx_create()
2490 rx->rcb_setup_cbfn(bnad, q1->rcb); in bna_rx_create()
2495 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; in bna_rx_create()
2496 rxp->cq.ccb->q_depth = rx_cfg->q_depth + in bna_rx_create()
2497 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? in bna_rx_create()
2498 0 : rx_cfg->q_depth); in bna_rx_create()
2499 rxp->cq.ccb->cq = &rxp->cq; in bna_rx_create()
2500 rxp->cq.ccb->rcb[0] = q0->rcb; in bna_rx_create()
2501 q0->rcb->ccb = rxp->cq.ccb; in bna_rx_create()
2503 rxp->cq.ccb->rcb[1] = q1->rcb; in bna_rx_create()
2504 q1->rcb->ccb = rxp->cq.ccb; in bna_rx_create()
2506 rxp->cq.ccb->hw_producer_index = in bna_rx_create()
2507 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva; in bna_rx_create()
2508 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell; in bna_rx_create()
2509 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type; in bna_rx_create()
2510 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector; in bna_rx_create()
2511 rxp->cq.ccb->rx_coalescing_timeo = in bna_rx_create()
2512 rxp->cq.ib.coalescing_timeo; in bna_rx_create()
2513 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; in bna_rx_create()
2514 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; in bna_rx_create()
2515 rxp->cq.ccb->bnad = bna->bnad; in bna_rx_create()
2516 rxp->cq.ccb->id = i; in bna_rx_create()
2520 rxp->cq.ccb->page_idx = cpage_idx; in bna_rx_create()
2521 rxp->cq.ccb->page_count = page_count; in bna_rx_create()
2524 if (rx->ccb_setup_cbfn) in bna_rx_create()
2525 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); in bna_rx_create()
2528 rx->hds_cfg = rx_cfg->hds_config; in bna_rx_create()
2530 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info); in bna_rx_create()
2532 bfa_fsm_set_state(rx, bna_rx_sm_stopped); in bna_rx_create()
2534 rx_mod->rid_mask |= (1 << rx->rid); in bna_rx_create()
2536 return rx; in bna_rx_create()
2540 bna_rx_destroy(struct bna_rx *rx) in bna_rx_destroy() argument
2542 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod; in bna_rx_destroy()
2548 bna_rxf_uninit(&rx->rxf); in bna_rx_destroy()
2550 while (!list_empty(&rx->rxp_q)) { in bna_rx_destroy()
2551 bfa_q_deq(&rx->rxp_q, &rxp); in bna_rx_destroy()
2553 if (rx->rcb_destroy_cbfn) in bna_rx_destroy()
2554 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb); in bna_rx_destroy()
2555 q0->rcb = NULL; in bna_rx_destroy()
2556 q0->rxp = NULL; in bna_rx_destroy()
2557 q0->rx = NULL; in bna_rx_destroy()
2561 if (rx->rcb_destroy_cbfn) in bna_rx_destroy()
2562 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb); in bna_rx_destroy()
2563 q1->rcb = NULL; in bna_rx_destroy()
2564 q1->rxp = NULL; in bna_rx_destroy()
2565 q1->rx = NULL; in bna_rx_destroy()
2568 rxp->rxq.slr.large = NULL; in bna_rx_destroy()
2569 rxp->rxq.slr.small = NULL; in bna_rx_destroy()
2571 if (rx->ccb_destroy_cbfn) in bna_rx_destroy()
2572 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); in bna_rx_destroy()
2573 rxp->cq.ccb = NULL; in bna_rx_destroy()
2574 rxp->rx = NULL; in bna_rx_destroy()
2578 list_for_each(qe, &rx_mod->rx_active_q) { in bna_rx_destroy()
2579 if (qe == &rx->qe) { in bna_rx_destroy()
2580 list_del(&rx->qe); in bna_rx_destroy()
2581 bfa_q_qe_init(&rx->qe); in bna_rx_destroy()
2586 rx_mod->rid_mask &= ~(1 << rx->rid); in bna_rx_destroy()
2588 rx->bna = NULL; in bna_rx_destroy()
2589 rx->priv = NULL; in bna_rx_destroy()
2590 bna_rx_put(rx_mod, rx); in bna_rx_destroy()
2594 bna_rx_enable(struct bna_rx *rx) in bna_rx_enable() argument
2596 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped) in bna_rx_enable()
2599 rx->rx_flags |= BNA_RX_F_ENABLED; in bna_rx_enable()
2600 if (rx->rx_flags & BNA_RX_F_ENET_STARTED) in bna_rx_enable()
2601 bfa_fsm_send_event(rx, RX_E_START); in bna_rx_enable()
2605 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, in bna_rx_disable() argument
2610 (*cbfn)(rx->bna->bnad, rx); in bna_rx_disable()
2612 rx->stop_cbfn = cbfn; in bna_rx_disable()
2613 rx->stop_cbarg = rx->bna->bnad; in bna_rx_disable()
2615 rx->rx_flags &= ~BNA_RX_F_ENABLED; in bna_rx_disable()
2617 bfa_fsm_send_event(rx, RX_E_STOP); in bna_rx_disable()
2622 bna_rx_cleanup_complete(struct bna_rx *rx) in bna_rx_cleanup_complete() argument
2624 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); in bna_rx_cleanup_complete()
2628 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, in bna_rx_mode_set() argument
2632 struct bna_rxf *rxf = &rx->rxf; in bna_rx_mode_set()
2639 if ((rx->bna->promisc_rid != BFI_INVALID_RID) && in bna_rx_mode_set()
2640 (rx->bna->promisc_rid != rxf->rx->rid)) in bna_rx_mode_set()
2644 if (rx->bna->default_mode_rid != BFI_INVALID_RID) in bna_rx_mode_set()
2654 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) && in bna_rx_mode_set()
2655 (rx->bna->default_mode_rid != rxf->rx->rid)) { in bna_rx_mode_set()
2660 if (rx->bna->promisc_rid != BFI_INVALID_RID) in bna_rx_mode_set()
2685 rxf->cam_fltr_cbfn = cbfn; in bna_rx_mode_set()
2686 rxf->cam_fltr_cbarg = rx->bna->bnad; in bna_rx_mode_set()
2689 (*cbfn)(rx->bna->bnad, rx); in bna_rx_mode_set()
2698 bna_rx_vlanfilter_enable(struct bna_rx *rx) in bna_rx_vlanfilter_enable() argument
2700 struct bna_rxf *rxf = &rx->rxf; in bna_rx_vlanfilter_enable()
2702 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) { in bna_rx_vlanfilter_enable()
2703 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED; in bna_rx_vlanfilter_enable()
2704 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; in bna_rx_vlanfilter_enable()
2710 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) in bna_rx_coalescing_timeo_set() argument
2715 list_for_each(qe, &rx->rxp_q) { in bna_rx_coalescing_timeo_set()
2717 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo; in bna_rx_coalescing_timeo_set()
2718 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo); in bna_rx_coalescing_timeo_set()
2729 bna->rx_mod.dim_vector[i][j] = vector[i][j]; in bna_rx_dim_reconfig()
2735 struct bna *bna = ccb->cq->rx->bna; in bna_rx_dim_update()
2740 if ((ccb->pkt_rate.small_pkt_cnt == 0) && in bna_rx_dim_update()
2741 (ccb->pkt_rate.large_pkt_cnt == 0)) in bna_rx_dim_update()
2746 small_rt = ccb->pkt_rate.small_pkt_cnt; in bna_rx_dim_update()
2747 large_rt = ccb->pkt_rate.large_pkt_cnt; in bna_rx_dim_update()
2773 ccb->pkt_rate.small_pkt_cnt = 0; in bna_rx_dim_update()
2774 ccb->pkt_rate.large_pkt_cnt = 0; in bna_rx_dim_update()
2776 coalescing_timeo = bna->rx_mod.dim_vector[load][bias]; in bna_rx_dim_update()
2777 ccb->rx_coalescing_timeo = coalescing_timeo; in bna_rx_dim_update()
2780 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo); in bna_rx_dim_update()
2795 * TX
2797 #define call_tx_stop_cbfn(tx) \ argument
2799 if ((tx)->stop_cbfn) { \
2802 cbfn = (tx)->stop_cbfn; \
2803 cbarg = (tx)->stop_cbarg; \
2804 (tx)->stop_cbfn = NULL; \
2805 (tx)->stop_cbarg = NULL; \
2806 cbfn(cbarg, (tx)); \
2810 #define call_tx_prio_change_cbfn(tx) \ argument
2812 if ((tx)->prio_change_cbfn) { \
2814 cbfn = (tx)->prio_change_cbfn; \
2815 (tx)->prio_change_cbfn = NULL; \
2816 cbfn((tx)->bna->bnad, (tx)); \
2820 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2821 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2822 static void bna_tx_enet_stop(struct bna_tx *tx);
2850 bna_tx_sm_stopped_entry(struct bna_tx *tx) in bna_tx_sm_stopped_entry() argument
2852 call_tx_stop_cbfn(tx); in bna_tx_sm_stopped_entry()
2856 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_stopped() argument
2860 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); in bna_tx_sm_stopped()
2864 call_tx_stop_cbfn(tx); in bna_tx_sm_stopped()
2868 /* No-op */ in bna_tx_sm_stopped()
2872 call_tx_prio_change_cbfn(tx); in bna_tx_sm_stopped()
2876 /* No-op */ in bna_tx_sm_stopped()
2885 bna_tx_sm_start_wait_entry(struct bna_tx *tx) in bna_tx_sm_start_wait_entry() argument
2887 bna_bfi_tx_enet_start(tx); in bna_tx_sm_start_wait_entry()
2891 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_start_wait() argument
2895 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); in bna_tx_sm_start_wait()
2896 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); in bna_tx_sm_start_wait()
2900 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); in bna_tx_sm_start_wait()
2901 bfa_fsm_set_state(tx, bna_tx_sm_stopped); in bna_tx_sm_start_wait()
2905 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) { in bna_tx_sm_start_wait()
2906 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | in bna_tx_sm_start_wait()
2908 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); in bna_tx_sm_start_wait()
2910 bfa_fsm_set_state(tx, bna_tx_sm_started); in bna_tx_sm_start_wait()
2914 tx->flags |= BNA_TX_F_PRIO_CHANGED; in bna_tx_sm_start_wait()
2918 tx->flags |= BNA_TX_F_BW_UPDATED; in bna_tx_sm_start_wait()
2927 bna_tx_sm_started_entry(struct bna_tx *tx) in bna_tx_sm_started_entry() argument
2931 int is_regular = (tx->type == BNA_TX_T_REGULAR); in bna_tx_sm_started_entry()
2933 list_for_each(qe, &tx->txq_q) { in bna_tx_sm_started_entry()
2935 txq->tcb->priority = txq->priority; in bna_tx_sm_started_entry()
2937 bna_ib_start(tx->bna, &txq->ib, is_regular); in bna_tx_sm_started_entry()
2939 tx->tx_resume_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started_entry()
2943 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_started() argument
2947 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); in bna_tx_sm_started()
2948 tx->tx_stall_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started()
2949 bna_tx_enet_stop(tx); in bna_tx_sm_started()
2953 bfa_fsm_set_state(tx, bna_tx_sm_failed); in bna_tx_sm_started()
2954 tx->tx_stall_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started()
2955 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started()
2960 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); in bna_tx_sm_started()
2969 bna_tx_sm_stop_wait_entry(struct bna_tx *tx) in bna_tx_sm_stop_wait_entry() argument
2974 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_stop_wait() argument
2979 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); in bna_tx_sm_stop_wait()
2980 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_stop_wait()
2985 * We are here due to start_wait -> stop_wait transition on in bna_tx_sm_stop_wait()
2988 bna_tx_enet_stop(tx); in bna_tx_sm_stop_wait()
2993 /* No-op */ in bna_tx_sm_stop_wait()
3002 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx) in bna_tx_sm_cleanup_wait_entry() argument
3007 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_cleanup_wait() argument
3013 /* No-op */ in bna_tx_sm_cleanup_wait()
3017 bfa_fsm_set_state(tx, bna_tx_sm_stopped); in bna_tx_sm_cleanup_wait()
3026 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) in bna_tx_sm_prio_stop_wait_entry() argument
3028 tx->tx_stall_cbfn(tx->bna->bnad, tx); in bna_tx_sm_prio_stop_wait_entry()
3029 bna_tx_enet_stop(tx); in bna_tx_sm_prio_stop_wait_entry()
3033 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_prio_stop_wait() argument
3037 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); in bna_tx_sm_prio_stop_wait()
3041 bfa_fsm_set_state(tx, bna_tx_sm_failed); in bna_tx_sm_prio_stop_wait()
3042 call_tx_prio_change_cbfn(tx); in bna_tx_sm_prio_stop_wait()
3043 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_prio_stop_wait()
3047 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait); in bna_tx_sm_prio_stop_wait()
3052 /* No-op */ in bna_tx_sm_prio_stop_wait()
3061 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx) in bna_tx_sm_prio_cleanup_wait_entry() argument
3063 call_tx_prio_change_cbfn(tx); in bna_tx_sm_prio_cleanup_wait_entry()
3064 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_prio_cleanup_wait_entry()
3068 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_prio_cleanup_wait() argument
3072 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); in bna_tx_sm_prio_cleanup_wait()
3076 bfa_fsm_set_state(tx, bna_tx_sm_failed); in bna_tx_sm_prio_cleanup_wait()
3081 /* No-op */ in bna_tx_sm_prio_cleanup_wait()
3085 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); in bna_tx_sm_prio_cleanup_wait()
3094 bna_tx_sm_failed_entry(struct bna_tx *tx) in bna_tx_sm_failed_entry() argument
3099 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_failed() argument
3103 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait); in bna_tx_sm_failed()
3107 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); in bna_tx_sm_failed()
3111 /* No-op */ in bna_tx_sm_failed()
3115 bfa_fsm_set_state(tx, bna_tx_sm_stopped); in bna_tx_sm_failed()
3124 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx) in bna_tx_sm_quiesce_wait_entry() argument
3129 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_quiesce_wait() argument
3133 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); in bna_tx_sm_quiesce_wait()
3137 bfa_fsm_set_state(tx, bna_tx_sm_failed); in bna_tx_sm_quiesce_wait()
3141 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); in bna_tx_sm_quiesce_wait()
3145 /* No-op */ in bna_tx_sm_quiesce_wait()
3154 bna_bfi_tx_enet_start(struct bna_tx *tx) in bna_bfi_tx_enet_start() argument
3156 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; in bna_bfi_tx_enet_start()
3161 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, in bna_bfi_tx_enet_start()
3162 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); in bna_bfi_tx_enet_start()
3163 cfg_req->mh.num_entries = htons( in bna_bfi_tx_enet_start()
3166 cfg_req->num_queues = tx->num_txq; in bna_bfi_tx_enet_start()
3167 for (i = 0, qe = bfa_q_first(&tx->txq_q); in bna_bfi_tx_enet_start()
3168 i < tx->num_txq; in bna_bfi_tx_enet_start()
3172 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); in bna_bfi_tx_enet_start()
3173 cfg_req->q_cfg[i].q.priority = txq->priority; in bna_bfi_tx_enet_start()
3175 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = in bna_bfi_tx_enet_start()
3176 txq->ib.ib_seg_host_addr.lsb; in bna_bfi_tx_enet_start()
3177 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = in bna_bfi_tx_enet_start()
3178 txq->ib.ib_seg_host_addr.msb; in bna_bfi_tx_enet_start()
3179 cfg_req->q_cfg[i].ib.intr.msix_index = in bna_bfi_tx_enet_start()
3180 htons((u16)txq->ib.intr_vector); in bna_bfi_tx_enet_start()
3183 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED; in bna_bfi_tx_enet_start()
3184 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; in bna_bfi_tx_enet_start()
3185 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; in bna_bfi_tx_enet_start()
3186 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED; in bna_bfi_tx_enet_start()
3187 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX) in bna_bfi_tx_enet_start()
3189 cfg_req->ib_cfg.coalescing_timeout = in bna_bfi_tx_enet_start()
3190 htonl((u32)txq->ib.coalescing_timeo); in bna_bfi_tx_enet_start()
3191 cfg_req->ib_cfg.inter_pkt_timeout = in bna_bfi_tx_enet_start()
3192 htonl((u32)txq->ib.interpkt_timeo); in bna_bfi_tx_enet_start()
3193 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count; in bna_bfi_tx_enet_start()
3195 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; in bna_bfi_tx_enet_start()
3196 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); in bna_bfi_tx_enet_start()
3197 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED; in bna_bfi_tx_enet_start()
3198 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; in bna_bfi_tx_enet_start()
3200 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, in bna_bfi_tx_enet_start()
3201 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh); in bna_bfi_tx_enet_start()
3202 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); in bna_bfi_tx_enet_start()
3206 bna_bfi_tx_enet_stop(struct bna_tx *tx) in bna_bfi_tx_enet_stop() argument
3208 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; in bna_bfi_tx_enet_stop()
3210 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, in bna_bfi_tx_enet_stop()
3211 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); in bna_bfi_tx_enet_stop()
3212 req->mh.num_entries = htons( in bna_bfi_tx_enet_stop()
3214 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), in bna_bfi_tx_enet_stop()
3215 &req->mh); in bna_bfi_tx_enet_stop()
3216 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); in bna_bfi_tx_enet_stop()
3220 bna_tx_enet_stop(struct bna_tx *tx) in bna_tx_enet_stop() argument
3226 list_for_each(qe, &tx->txq_q) { in bna_tx_enet_stop()
3228 bna_ib_stop(tx->bna, &txq->ib); in bna_tx_enet_stop()
3231 bna_bfi_tx_enet_stop(tx); in bna_tx_enet_stop()
3242 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; in bna_txq_qpt_setup()
3243 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; in bna_txq_qpt_setup()
3244 txq->qpt.kv_qpt_ptr = qpt_mem->kva; in bna_txq_qpt_setup()
3245 txq->qpt.page_count = page_count; in bna_txq_qpt_setup()
3246 txq->qpt.page_size = page_size; in bna_txq_qpt_setup()
3248 txq->tcb->sw_qpt = (void **) swqpt_mem->kva; in bna_txq_qpt_setup()
3251 txq->tcb->sw_qpt[i] = page_mem[i].kva; in bna_txq_qpt_setup()
3253 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = in bna_txq_qpt_setup()
3255 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = in bna_txq_qpt_setup()
3264 struct bna_tx *tx = NULL; in bna_tx_get() local
3266 if (list_empty(&tx_mod->tx_free_q)) in bna_tx_get()
3269 bfa_q_deq(&tx_mod->tx_free_q, &qe); in bna_tx_get()
3271 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe); in bna_tx_get()
3273 tx = (struct bna_tx *)qe; in bna_tx_get()
3274 bfa_q_qe_init(&tx->qe); in bna_tx_get()
3275 tx->type = type; in bna_tx_get()
3277 return tx; in bna_tx_get()
3281 bna_tx_free(struct bna_tx *tx) in bna_tx_free() argument
3283 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; in bna_tx_free()
3288 while (!list_empty(&tx->txq_q)) { in bna_tx_free()
3289 bfa_q_deq(&tx->txq_q, &txq); in bna_tx_free()
3290 bfa_q_qe_init(&txq->qe); in bna_tx_free()
3291 txq->tcb = NULL; in bna_tx_free()
3292 txq->tx = NULL; in bna_tx_free()
3293 list_add_tail(&txq->qe, &tx_mod->txq_free_q); in bna_tx_free()
3296 list_for_each(qe, &tx_mod->tx_active_q) { in bna_tx_free()
3297 if (qe == &tx->qe) { in bna_tx_free()
3298 list_del(&tx->qe); in bna_tx_free()
3299 bfa_q_qe_init(&tx->qe); in bna_tx_free()
3304 tx->bna = NULL; in bna_tx_free()
3305 tx->priv = NULL; in bna_tx_free()
3308 list_for_each(qe, &tx_mod->tx_free_q) { in bna_tx_free()
3309 if (((struct bna_tx *)qe)->rid < tx->rid) in bna_tx_free()
3318 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe); in bna_tx_free()
3319 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) { in bna_tx_free()
3321 list_add_tail(&tx->qe, &tx_mod->tx_free_q); in bna_tx_free()
3324 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe); in bna_tx_free()
3325 bfa_q_prev(&tx->qe) = prev_qe; in bna_tx_free()
3326 bfa_q_next(prev_qe) = &tx->qe; in bna_tx_free()
3327 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe; in bna_tx_free()
3332 bna_tx_start(struct bna_tx *tx) in bna_tx_start() argument
3334 tx->flags |= BNA_TX_F_ENET_STARTED; in bna_tx_start()
3335 if (tx->flags & BNA_TX_F_ENABLED) in bna_tx_start()
3336 bfa_fsm_send_event(tx, TX_E_START); in bna_tx_start()
3340 bna_tx_stop(struct bna_tx *tx) in bna_tx_stop() argument
3342 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; in bna_tx_stop()
3343 tx->stop_cbarg = &tx->bna->tx_mod; in bna_tx_stop()
3345 tx->flags &= ~BNA_TX_F_ENET_STARTED; in bna_tx_stop()
3346 bfa_fsm_send_event(tx, TX_E_STOP); in bna_tx_stop()
3350 bna_tx_fail(struct bna_tx *tx) in bna_tx_fail() argument
3352 tx->flags &= ~BNA_TX_F_ENET_STARTED; in bna_tx_fail()
3353 bfa_fsm_send_event(tx, TX_E_FAIL); in bna_tx_fail()
3357 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) in bna_bfi_tx_enet_start_rsp() argument
3359 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; in bna_bfi_tx_enet_start_rsp()
3364 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp, in bna_bfi_tx_enet_start_rsp()
3367 tx->hw_id = cfg_rsp->hw_id; in bna_bfi_tx_enet_start_rsp()
3369 for (i = 0, qe = bfa_q_first(&tx->txq_q); in bna_bfi_tx_enet_start_rsp()
3370 i < tx->num_txq; i++, qe = bfa_q_next(qe)) { in bna_bfi_tx_enet_start_rsp()
3374 txq->tcb->i_dbell->doorbell_addr = in bna_bfi_tx_enet_start_rsp()
3375 tx->bna->pcidev.pci_bar_kva in bna_bfi_tx_enet_start_rsp()
3376 + ntohl(cfg_rsp->q_handles[i].i_dbell); in bna_bfi_tx_enet_start_rsp()
3377 txq->tcb->q_dbell = in bna_bfi_tx_enet_start_rsp()
3378 tx->bna->pcidev.pci_bar_kva in bna_bfi_tx_enet_start_rsp()
3379 + ntohl(cfg_rsp->q_handles[i].q_dbell); in bna_bfi_tx_enet_start_rsp()
3380 txq->hw_id = cfg_rsp->q_handles[i].hw_qid; in bna_bfi_tx_enet_start_rsp()
3383 (*txq->tcb->hw_consumer_index) = 0; in bna_bfi_tx_enet_start_rsp()
3384 txq->tcb->producer_index = txq->tcb->consumer_index = 0; in bna_bfi_tx_enet_start_rsp()
3387 bfa_fsm_send_event(tx, TX_E_STARTED); in bna_bfi_tx_enet_start_rsp()
3391 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) in bna_bfi_tx_enet_stop_rsp() argument
3393 bfa_fsm_send_event(tx, TX_E_STOPPED); in bna_bfi_tx_enet_stop_rsp()
3399 struct bna_tx *tx; in bna_bfi_bw_update_aen() local
3402 list_for_each(qe, &tx_mod->tx_active_q) { in bna_bfi_bw_update_aen()
3403 tx = (struct bna_tx *)qe; in bna_bfi_bw_update_aen()
3404 bfa_fsm_send_event(tx, TX_E_BW_UPDATE); in bna_bfi_bw_update_aen()
3417 mem_info->mem_type = BNA_MEM_T_KVA; in bna_tx_res_req()
3418 mem_info->len = sizeof(struct bna_tcb); in bna_tx_res_req()
3419 mem_info->num = num_txq; in bna_tx_res_req()
3427 mem_info->mem_type = BNA_MEM_T_DMA; in bna_tx_res_req()
3428 mem_info->len = page_count * sizeof(struct bna_dma_addr); in bna_tx_res_req()
3429 mem_info->num = num_txq; in bna_tx_res_req()
3433 mem_info->mem_type = BNA_MEM_T_KVA; in bna_tx_res_req()
3434 mem_info->len = page_count * sizeof(void *); in bna_tx_res_req()
3435 mem_info->num = num_txq; in bna_tx_res_req()
3439 mem_info->mem_type = BNA_MEM_T_DMA; in bna_tx_res_req()
3440 mem_info->len = PAGE_SIZE; in bna_tx_res_req()
3441 mem_info->num = num_txq * page_count; in bna_tx_res_req()
3445 mem_info->mem_type = BNA_MEM_T_DMA; in bna_tx_res_req()
3446 mem_info->len = BFI_IBIDX_SIZE; in bna_tx_res_req()
3447 mem_info->num = num_txq; in bna_tx_res_req()
3462 struct bna_tx_mod *tx_mod = &bna->tx_mod; in bna_tx_create()
3463 struct bna_tx *tx; in bna_tx_create() local
3473 tx_cfg->num_txq; in bna_tx_create()
3480 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq)) in bna_tx_create()
3483 /* Tx */ in bna_tx_create()
3485 tx = bna_tx_get(tx_mod, tx_cfg->tx_type); in bna_tx_create()
3486 if (!tx) in bna_tx_create()
3488 tx->bna = bna; in bna_tx_create()
3489 tx->priv = priv; in bna_tx_create()
3493 INIT_LIST_HEAD(&tx->txq_q); in bna_tx_create()
3494 for (i = 0; i < tx_cfg->num_txq; i++) { in bna_tx_create()
3495 if (list_empty(&tx_mod->txq_free_q)) in bna_tx_create()
3498 bfa_q_deq(&tx_mod->txq_free_q, &txq); in bna_tx_create()
3499 bfa_q_qe_init(&txq->qe); in bna_tx_create()
3500 list_add_tail(&txq->qe, &tx->txq_q); in bna_tx_create()
3501 txq->tx = tx; in bna_tx_create()
3508 /* Tx */ in bna_tx_create()
3510 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; in bna_tx_create()
3511 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; in bna_tx_create()
3513 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; in bna_tx_create()
3514 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; in bna_tx_create()
3515 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; in bna_tx_create()
3517 list_add_tail(&tx->qe, &tx_mod->tx_active_q); in bna_tx_create()
3519 tx->num_txq = tx_cfg->num_txq; in bna_tx_create()
3521 tx->flags = 0; in bna_tx_create()
3522 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { in bna_tx_create()
3523 switch (tx->type) { in bna_tx_create()
3525 if (!(tx->bna->tx_mod.flags & in bna_tx_create()
3527 tx->flags |= BNA_TX_F_ENET_STARTED; in bna_tx_create()
3530 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) in bna_tx_create()
3531 tx->flags |= BNA_TX_F_ENET_STARTED; in bna_tx_create()
3540 list_for_each(qe, &tx->txq_q) { in bna_tx_create()
3542 txq->tcb = (struct bna_tcb *) in bna_tx_create()
3544 txq->tx_packets = 0; in bna_tx_create()
3545 txq->tx_bytes = 0; in bna_tx_create()
3548 txq->ib.ib_seg_host_addr.lsb = in bna_tx_create()
3550 txq->ib.ib_seg_host_addr.msb = in bna_tx_create()
3552 txq->ib.ib_seg_host_addr_kva = in bna_tx_create()
3554 txq->ib.intr_type = intr_info->intr_type; in bna_tx_create()
3555 txq->ib.intr_vector = (intr_info->num == 1) ? in bna_tx_create()
3556 intr_info->idl[0].vector : in bna_tx_create()
3557 intr_info->idl[i].vector; in bna_tx_create()
3558 if (intr_info->intr_type == BNA_INTR_T_INTX) in bna_tx_create()
3559 txq->ib.intr_vector = (1 << txq->ib.intr_vector); in bna_tx_create()
3560 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo; in bna_tx_create()
3561 txq->ib.interpkt_timeo = 0; /* Not used */ in bna_tx_create()
3562 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT; in bna_tx_create()
3566 txq->tcb->q_depth = tx_cfg->txq_depth; in bna_tx_create()
3567 txq->tcb->unmap_q = (void *) in bna_tx_create()
3569 txq->tcb->hw_consumer_index = in bna_tx_create()
3570 (u32 *)txq->ib.ib_seg_host_addr_kva; in bna_tx_create()
3571 txq->tcb->i_dbell = &txq->ib.door_bell; in bna_tx_create()
3572 txq->tcb->intr_type = txq->ib.intr_type; in bna_tx_create()
3573 txq->tcb->intr_vector = txq->ib.intr_vector; in bna_tx_create()
3574 txq->tcb->txq = txq; in bna_tx_create()
3575 txq->tcb->bnad = bnad; in bna_tx_create()
3576 txq->tcb->id = i; in bna_tx_create()
3584 txq->tcb->page_idx = page_idx; in bna_tx_create()
3585 txq->tcb->page_count = page_count; in bna_tx_create()
3589 if (tx->tcb_setup_cbfn) in bna_tx_create()
3590 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); in bna_tx_create()
3592 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO) in bna_tx_create()
3593 txq->priority = txq->tcb->id; in bna_tx_create()
3595 txq->priority = tx_mod->default_prio; in bna_tx_create()
3600 tx->txf_vlan_id = 0; in bna_tx_create()
3602 bfa_fsm_set_state(tx, bna_tx_sm_stopped); in bna_tx_create()
3604 tx_mod->rid_mask |= (1 << tx->rid); in bna_tx_create()
3606 return tx; in bna_tx_create()
3609 bna_tx_free(tx); in bna_tx_create()
3614 bna_tx_destroy(struct bna_tx *tx) in bna_tx_destroy() argument
3619 list_for_each(qe, &tx->txq_q) { in bna_tx_destroy()
3621 if (tx->tcb_destroy_cbfn) in bna_tx_destroy()
3622 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); in bna_tx_destroy()
3625 tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid); in bna_tx_destroy()
3626 bna_tx_free(tx); in bna_tx_destroy()
3630 bna_tx_enable(struct bna_tx *tx) in bna_tx_enable() argument
3632 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped) in bna_tx_enable()
3635 tx->flags |= BNA_TX_F_ENABLED; in bna_tx_enable()
3637 if (tx->flags & BNA_TX_F_ENET_STARTED) in bna_tx_enable()
3638 bfa_fsm_send_event(tx, TX_E_START); in bna_tx_enable()
3642 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, in bna_tx_disable() argument
3646 (*cbfn)(tx->bna->bnad, tx); in bna_tx_disable()
3650 tx->stop_cbfn = cbfn; in bna_tx_disable()
3651 tx->stop_cbarg = tx->bna->bnad; in bna_tx_disable()
3653 tx->flags &= ~BNA_TX_F_ENABLED; in bna_tx_disable()
3655 bfa_fsm_send_event(tx, TX_E_STOP); in bna_tx_disable()
3659 bna_tx_cleanup_complete(struct bna_tx *tx) in bna_tx_cleanup_complete() argument
3661 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE); in bna_tx_cleanup_complete()
3665 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx) in bna_tx_mod_cb_tx_stopped() argument
3669 bfa_wc_down(&tx_mod->tx_stop_wc); in bna_tx_mod_cb_tx_stopped()
3677 if (tx_mod->stop_cbfn) in bna_tx_mod_cb_tx_stopped_all()
3678 tx_mod->stop_cbfn(&tx_mod->bna->enet); in bna_tx_mod_cb_tx_stopped_all()
3679 tx_mod->stop_cbfn = NULL; in bna_tx_mod_cb_tx_stopped_all()
3688 tx_mod->bna = bna; in bna_tx_mod_init()
3689 tx_mod->flags = 0; in bna_tx_mod_init()
3691 tx_mod->tx = (struct bna_tx *) in bna_tx_mod_init()
3693 tx_mod->txq = (struct bna_txq *) in bna_tx_mod_init()
3696 INIT_LIST_HEAD(&tx_mod->tx_free_q); in bna_tx_mod_init()
3697 INIT_LIST_HEAD(&tx_mod->tx_active_q); in bna_tx_mod_init()
3699 INIT_LIST_HEAD(&tx_mod->txq_free_q); in bna_tx_mod_init()
3701 for (i = 0; i < bna->ioceth.attr.num_txq; i++) { in bna_tx_mod_init()
3702 tx_mod->tx[i].rid = i; in bna_tx_mod_init()
3703 bfa_q_qe_init(&tx_mod->tx[i].qe); in bna_tx_mod_init()
3704 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); in bna_tx_mod_init()
3705 bfa_q_qe_init(&tx_mod->txq[i].qe); in bna_tx_mod_init()
3706 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q); in bna_tx_mod_init()
3709 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL; in bna_tx_mod_init()
3710 tx_mod->default_prio = 0; in bna_tx_mod_init()
3711 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED; in bna_tx_mod_init()
3712 tx_mod->iscsi_prio = -1; in bna_tx_mod_init()
3722 list_for_each(qe, &tx_mod->tx_free_q) in bna_tx_mod_uninit()
3726 list_for_each(qe, &tx_mod->txq_free_q) in bna_tx_mod_uninit()
3729 tx_mod->bna = NULL; in bna_tx_mod_uninit()
3735 struct bna_tx *tx; in bna_tx_mod_start() local
3738 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED; in bna_tx_mod_start()
3740 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK; in bna_tx_mod_start()
3742 list_for_each(qe, &tx_mod->tx_active_q) { in bna_tx_mod_start()
3743 tx = (struct bna_tx *)qe; in bna_tx_mod_start()
3744 if (tx->type == type) in bna_tx_mod_start()
3745 bna_tx_start(tx); in bna_tx_mod_start()
3752 struct bna_tx *tx; in bna_tx_mod_stop() local
3755 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; in bna_tx_mod_stop()
3756 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; in bna_tx_mod_stop()
3758 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped; in bna_tx_mod_stop()
3760 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod); in bna_tx_mod_stop()
3762 list_for_each(qe, &tx_mod->tx_active_q) { in bna_tx_mod_stop()
3763 tx = (struct bna_tx *)qe; in bna_tx_mod_stop()
3764 if (tx->type == type) { in bna_tx_mod_stop()
3765 bfa_wc_up(&tx_mod->tx_stop_wc); in bna_tx_mod_stop()
3766 bna_tx_stop(tx); in bna_tx_mod_stop()
3770 bfa_wc_wait(&tx_mod->tx_stop_wc); in bna_tx_mod_stop()
3776 struct bna_tx *tx; in bna_tx_mod_fail() local
3779 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; in bna_tx_mod_fail()
3780 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; in bna_tx_mod_fail()
3782 list_for_each(qe, &tx_mod->tx_active_q) { in bna_tx_mod_fail()
3783 tx = (struct bna_tx *)qe; in bna_tx_mod_fail()
3784 bna_tx_fail(tx); in bna_tx_mod_fail()
3789 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) in bna_tx_coalescing_timeo_set() argument
3794 list_for_each(qe, &tx->txq_q) { in bna_tx_coalescing_timeo_set()
3796 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo); in bna_tx_coalescing_timeo_set()