Lines Matching defs:rxf
25 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
27 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
28 (rxf)->vlan_strip_pending = true; \
31 #define bna_rxf_rss_cfg_soft_reset(rxf) \
33 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
34 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
39 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
40 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
41 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
42 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
43 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
44 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
45 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
47 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
49 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
62 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
64 call_rxf_stop_cbfn(rxf);
68 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
72 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
76 call_rxf_stop_cbfn(rxf);
84 call_rxf_cam_fltr_cbfn(rxf);
93 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
95 if (!bna_rxf_cfg_apply(rxf)) {
97 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
102 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
106 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
110 bna_rxf_cfg_reset(rxf);
111 call_rxf_start_cbfn(rxf);
112 call_rxf_cam_fltr_cbfn(rxf);
113 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
121 if (!bna_rxf_cfg_apply(rxf)) {
123 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
133 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
135 call_rxf_start_cbfn(rxf);
136 call_rxf_cam_fltr_cbfn(rxf);
140 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
145 bna_rxf_cfg_reset(rxf);
146 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
150 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
159 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
164 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
169 bna_rxf_cfg_reset(rxf);
170 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
179 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
182 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
184 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
188 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
190 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
194 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
197 &rxf->bfi_enet_cmd.mcast_add_req;
200 0, rxf->rx->rid);
204 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
206 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
210 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
213 &rxf->bfi_enet_cmd.mcast_del_req;
216 0, rxf->rx->rid);
220 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
222 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
226 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
228 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
231 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
235 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
237 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
241 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
243 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
246 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
250 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
252 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
256 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
258 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
263 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
269 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
271 htonl(rxf->vlan_filter_table[j]);
275 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
277 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
281 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
283 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
286 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
289 req->enable = rxf->vlan_strip_status;
290 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
292 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
296 bna_bfi_rit_cfg(struct bna_rxf *rxf)
298 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
301 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
304 req->size = htons(rxf->rit_size);
305 memcpy(&req->table[0], rxf->rit, rxf->rit_size);
306 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
308 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
312 bna_bfi_rss_cfg(struct bna_rxf *rxf)
314 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
318 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
321 req->cfg.type = rxf->rss_cfg.hash_type;
322 req->cfg.mask = rxf->rss_cfg.hash_mask;
325 htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
326 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
328 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
332 bna_bfi_rss_enable(struct bna_rxf *rxf)
334 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
337 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
340 req->enable = rxf->rss_status;
341 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
343 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
348 bna_rxf_mcmac_get(struct bna_rxf *rxf, const u8 *mac_addr)
352 list_for_each_entry(mac, &rxf->mcast_active_q, qe)
356 list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe)
364 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
368 list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe)
376 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
381 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
382 mchandle = bna_rxf_mchandle_get(rxf, handle);
384 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
387 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
394 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
407 bna_bfi_mcast_del_req(rxf, mchandle->handle);
411 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
419 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
425 while (!list_empty(&rxf->mcast_pending_del_q)) {
426 mac = list_first_entry(&rxf->mcast_pending_del_q,
428 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
429 list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
435 if (!list_empty(&rxf->mcast_pending_add_q)) {
436 mac = list_first_entry(&rxf->mcast_pending_add_q,
438 list_move_tail(&mac->qe, &rxf->mcast_active_q);
439 bna_bfi_mcast_add_req(rxf, mac);
447 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
452 if (rxf->vlan_pending_bitmask) {
453 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
458 rxf->vlan_pending_bitmask &= ~BIT(block_idx);
459 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
467 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
473 while (!list_empty(&rxf->mcast_pending_del_q)) {
474 mac = list_first_entry(&rxf->mcast_pending_del_q,
476 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
477 list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
483 while (!list_empty(&rxf->mcast_active_q)) {
484 mac = list_first_entry(&rxf->mcast_active_q,
486 list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
487 if (bna_rxf_mcast_del(rxf, mac, cleanup))
495 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
497 if (rxf->rss_pending) {
498 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
499 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
500 bna_bfi_rit_cfg(rxf);
504 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
505 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
506 bna_bfi_rss_cfg(rxf);
510 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
511 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
512 bna_bfi_rss_enable(rxf);
521 bna_rxf_cfg_apply(struct bna_rxf *rxf)
523 if (bna_rxf_ucast_cfg_apply(rxf))
526 if (bna_rxf_mcast_cfg_apply(rxf))
529 if (bna_rxf_promisc_cfg_apply(rxf))
532 if (bna_rxf_allmulti_cfg_apply(rxf))
535 if (bna_rxf_vlan_cfg_apply(rxf))
538 if (bna_rxf_vlan_strip_cfg_apply(rxf))
541 if (bna_rxf_rss_cfg_apply(rxf))
548 bna_rxf_cfg_reset(struct bna_rxf *rxf)
550 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
551 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
552 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
553 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
554 bna_rxf_vlan_cfg_soft_reset(rxf);
555 bna_rxf_rss_cfg_soft_reset(rxf);
559 bna_rit_init(struct bna_rxf *rxf, int rit_size)
561 struct bna_rx *rx = rxf->rx;
565 rxf->rit_size = rit_size;
567 rxf->rit[offset] = rxp->cq.ccb->id;
573 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
575 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
579 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
587 rxf->ucast_active_set = 0;
590 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
594 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
598 &rxf->bfi_enet_cmd.mcast_add_req;
602 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
604 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
608 bna_rxf_init(struct bna_rxf *rxf,
613 rxf->rx = rx;
615 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
616 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
617 rxf->ucast_pending_set = 0;
618 rxf->ucast_active_set = 0;
619 INIT_LIST_HEAD(&rxf->ucast_active_q);
620 rxf->ucast_pending_mac = NULL;
622 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
623 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
624 INIT_LIST_HEAD(&rxf->mcast_active_q);
625 INIT_LIST_HEAD(&rxf->mcast_handle_q);
627 rxf->rit = (u8 *)
629 bna_rit_init(rxf, q_config->num_paths);
631 rxf->rss_status = q_config->rss_status;
632 if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
633 rxf->rss_cfg = q_config->rss_config;
634 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
635 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
636 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
639 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
640 memset(rxf->vlan_filter_table, 0,
642 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
643 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
645 rxf->vlan_strip_status = q_config->vlan_strip_status;
647 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
651 bna_rxf_uninit(struct bna_rxf *rxf)
655 rxf->ucast_pending_set = 0;
656 rxf->ucast_active_set = 0;
658 while (!list_empty(&rxf->ucast_pending_add_q)) {
659 mac = list_first_entry(&rxf->ucast_pending_add_q,
661 list_move_tail(&mac->qe, bna_ucam_mod_free_q(rxf->rx->bna));
664 if (rxf->ucast_pending_mac) {
665 list_add_tail(&rxf->ucast_pending_mac->qe,
666 bna_ucam_mod_free_q(rxf->rx->bna));
667 rxf->ucast_pending_mac = NULL;
670 while (!list_empty(&rxf->mcast_pending_add_q)) {
671 mac = list_first_entry(&rxf->mcast_pending_add_q,
673 list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
676 rxf->rxmode_pending = 0;
677 rxf->rxmode_pending_bitmask = 0;
678 if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
679 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
680 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
681 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
683 rxf->rss_pending = 0;
684 rxf->vlan_strip_pending = false;
686 rxf->rx = NULL;
696 bna_rxf_start(struct bna_rxf *rxf)
698 rxf->start_cbfn = bna_rx_cb_rxf_started;
699 rxf->start_cbarg = rxf->rx;
700 bfa_fsm_send_event(rxf, RXF_E_START);
710 bna_rxf_stop(struct bna_rxf *rxf)
712 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
713 rxf->stop_cbarg = rxf->rx;
714 bfa_fsm_send_event(rxf, RXF_E_STOP);
718 bna_rxf_fail(struct bna_rxf *rxf)
720 bfa_fsm_send_event(rxf, RXF_E_FAIL);
726 struct bna_rxf *rxf = &rx->rxf;
728 if (rxf->ucast_pending_mac == NULL) {
729 rxf->ucast_pending_mac =
730 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
731 if (rxf->ucast_pending_mac == NULL)
735 ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
736 rxf->ucast_pending_set = 1;
737 rxf->cam_fltr_cbfn = NULL;
738 rxf->cam_fltr_cbarg = rx->bna->bnad;
740 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
749 struct bna_rxf *rxf = &rx->rxf;
753 if (bna_mac_find(&rxf->mcast_active_q, addr) ||
754 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
760 mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
764 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
766 rxf->cam_fltr_cbfn = cbfn;
767 rxf->cam_fltr_cbarg = rx->bna->bnad;
769 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
778 struct bna_rxf *rxf = &rx->rxf;
785 while (!list_empty(&rxf->ucast_pending_add_q)) {
786 mac = list_first_entry(&rxf->ucast_pending_add_q,
792 while (!list_empty(&rxf->ucast_active_q)) {
793 mac = list_first_entry(&rxf->ucast_active_q,
798 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
816 list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
819 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
836 struct bna_rxf *rxf = &rx->rxf;
843 while (!list_empty(&rxf->mcast_pending_add_q)) {
844 mac = list_first_entry(&rxf->mcast_pending_add_q,
850 while (!list_empty(&rxf->mcast_active_q)) {
851 mac = list_first_entry(&rxf->mcast_active_q,
856 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
876 list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
879 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
895 struct bna_rxf *rxf = &rx->rxf;
900 while (!list_empty(&rxf->mcast_pending_add_q)) {
901 mac = list_first_entry(&rxf->mcast_pending_add_q,
903 list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
907 while (!list_empty(&rxf->mcast_active_q)) {
908 mac = list_first_entry(&rxf->mcast_active_q,
911 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
913 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
915 list_add_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
920 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
926 struct bna_rxf *rxf = &rx->rxf;
931 rxf->vlan_filter_table[index] |= bit;
932 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
933 rxf->vlan_pending_bitmask |= BIT(group_id);
934 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
941 struct bna_rxf *rxf = &rx->rxf;
946 rxf->vlan_filter_table[index] &= ~bit;
947 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
948 rxf->vlan_pending_bitmask |= BIT(group_id);
949 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
954 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
959 if (!list_empty(&rxf->ucast_pending_del_q)) {
960 mac = list_first_entry(&rxf->ucast_pending_del_q,
962 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
963 list_move_tail(&mac->qe, bna_ucam_mod_del_q(rxf->rx->bna));
968 if (rxf->ucast_pending_set) {
969 rxf->ucast_pending_set = 0;
970 ether_addr_copy(rxf->ucast_active_mac.addr,
971 rxf->ucast_pending_mac->addr);
972 rxf->ucast_active_set = 1;
973 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
979 if (!list_empty(&rxf->ucast_pending_add_q)) {
980 mac = list_first_entry(&rxf->ucast_pending_add_q,
982 list_move_tail(&mac->qe, &rxf->ucast_active_q);
983 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
991 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
996 while (!list_empty(&rxf->ucast_pending_del_q)) {
997 mac = list_first_entry(&rxf->ucast_pending_del_q,
1001 bna_ucam_mod_del_q(rxf->rx->bna));
1003 bna_bfi_ucast_req(rxf, mac,
1006 bna_ucam_mod_del_q(rxf->rx->bna));
1012 while (!list_empty(&rxf->ucast_active_q)) {
1013 mac = list_first_entry(&rxf->ucast_active_q,
1015 list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
1017 bna_bfi_ucast_req(rxf, mac,
1023 if (rxf->ucast_active_set) {
1024 rxf->ucast_pending_set = 1;
1025 rxf->ucast_active_set = 0;
1027 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1037 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1039 struct bna *bna = rxf->rx->bna;
1042 if (is_promisc_enable(rxf->rxmode_pending,
1043 rxf->rxmode_pending_bitmask)) {
1045 promisc_inactive(rxf->rxmode_pending,
1046 rxf->rxmode_pending_bitmask);
1047 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1048 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1050 } else if (is_promisc_disable(rxf->rxmode_pending,
1051 rxf->rxmode_pending_bitmask)) {
1053 promisc_inactive(rxf->rxmode_pending,
1054 rxf->rxmode_pending_bitmask);
1055 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1057 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1065 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1067 struct bna *bna = rxf->rx->bna;
1070 if (is_promisc_disable(rxf->rxmode_pending,
1071 rxf->rxmode_pending_bitmask)) {
1072 promisc_inactive(rxf->rxmode_pending,
1073 rxf->rxmode_pending_bitmask);
1074 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1077 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1083 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1084 promisc_enable(rxf->rxmode_pending,
1085 rxf->rxmode_pending_bitmask);
1086 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1088 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1097 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1100 if (is_allmulti_enable(rxf->rxmode_pending,
1101 rxf->rxmode_pending_bitmask)) {
1103 allmulti_inactive(rxf->rxmode_pending,
1104 rxf->rxmode_pending_bitmask);
1105 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1106 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1108 } else if (is_allmulti_disable(rxf->rxmode_pending,
1109 rxf->rxmode_pending_bitmask)) {
1111 allmulti_inactive(rxf->rxmode_pending,
1112 rxf->rxmode_pending_bitmask);
1113 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1114 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1122 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1125 if (is_allmulti_disable(rxf->rxmode_pending,
1126 rxf->rxmode_pending_bitmask)) {
1127 allmulti_inactive(rxf->rxmode_pending,
1128 rxf->rxmode_pending_bitmask);
1129 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1131 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1137 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1138 allmulti_enable(rxf->rxmode_pending,
1139 rxf->rxmode_pending_bitmask);
1140 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1142 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1151 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1153 struct bna *bna = rxf->rx->bna;
1156 if (is_promisc_enable(rxf->rxmode_pending,
1157 rxf->rxmode_pending_bitmask) ||
1158 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1160 } else if (is_promisc_disable(rxf->rxmode_pending,
1161 rxf->rxmode_pending_bitmask)) {
1163 promisc_inactive(rxf->rxmode_pending,
1164 rxf->rxmode_pending_bitmask);
1167 promisc_enable(rxf->rxmode_pending,
1168 rxf->rxmode_pending_bitmask);
1169 bna->promisc_rid = rxf->rx->rid;
1177 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1179 struct bna *bna = rxf->rx->bna;
1182 if (is_promisc_disable(rxf->rxmode_pending,
1183 rxf->rxmode_pending_bitmask) ||
1184 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1186 } else if (is_promisc_enable(rxf->rxmode_pending,
1187 rxf->rxmode_pending_bitmask)) {
1189 promisc_inactive(rxf->rxmode_pending,
1190 rxf->rxmode_pending_bitmask);
1192 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1194 promisc_disable(rxf->rxmode_pending,
1195 rxf->rxmode_pending_bitmask);
1203 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1207 if (is_allmulti_enable(rxf->rxmode_pending,
1208 rxf->rxmode_pending_bitmask) ||
1209 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1211 } else if (is_allmulti_disable(rxf->rxmode_pending,
1212 rxf->rxmode_pending_bitmask)) {
1214 allmulti_inactive(rxf->rxmode_pending,
1215 rxf->rxmode_pending_bitmask);
1218 allmulti_enable(rxf->rxmode_pending,
1219 rxf->rxmode_pending_bitmask);
1227 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1231 if (is_allmulti_disable(rxf->rxmode_pending,
1232 rxf->rxmode_pending_bitmask) ||
1233 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1235 } else if (is_allmulti_enable(rxf->rxmode_pending,
1236 rxf->rxmode_pending_bitmask)) {
1238 allmulti_inactive(rxf->rxmode_pending,
1239 rxf->rxmode_pending_bitmask);
1240 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1242 allmulti_disable(rxf->rxmode_pending,
1243 rxf->rxmode_pending_bitmask);
1251 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1253 if (rxf->vlan_strip_pending) {
1254 rxf->vlan_strip_pending = false;
1255 bna_bfi_vlan_strip_enable(rxf);
1408 bna_rxf_start(&rx->rxf);
1422 bna_rxf_fail(&rx->rxf);
1428 bna_rxf_stop(&rx->rxf);
1487 bna_rxf_stop(&rx->rxf);
1493 bna_rxf_fail(&rx->rxf);
1514 bna_rxf_fail(&rx->rxf);
1707 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
2473 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2491 bna_rxf_uninit(&rx->rxf);
2572 struct bna_rxf *rxf = &rx->rxf;
2574 if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2575 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2576 rxf->vlan_strip_pending = true;
2577 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2584 struct bna_rxf *rxf = &rx->rxf;
2586 if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2587 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2588 rxf->vlan_strip_pending = true;
2589 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2597 struct bna_rxf *rxf = &rx->rxf;
2605 (rx->bna->promisc_rid != rxf->rx->rid))
2620 (rx->bna->default_mode_rid != rxf->rx->rid)) {
2632 if (bna_rxf_promisc_enable(rxf))
2635 if (bna_rxf_promisc_disable(rxf))
2640 if (bna_rxf_allmulti_enable(rxf))
2643 if (bna_rxf_allmulti_disable(rxf))
2650 rxf->cam_fltr_cbfn = NULL;
2651 rxf->cam_fltr_cbarg = rx->bna->bnad;
2652 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2664 struct bna_rxf *rxf = &rx->rxf;
2666 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2667 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2668 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2669 bfa_fsm_send_event(rxf, RXF_E_CONFIG);