Lines Matching refs:fltr
5293 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) in bnxt_insert_usr_fltr() argument
5295 u8 type = fltr->type, flags = fltr->flags; in bnxt_insert_usr_fltr()
5297 INIT_LIST_HEAD(&fltr->list); in bnxt_insert_usr_fltr()
5300 list_add_tail(&fltr->list, &bp->usr_fltr_list); in bnxt_insert_usr_fltr()
5303 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) in bnxt_del_one_usr_fltr() argument
5305 if (!list_empty(&fltr->list)) in bnxt_del_one_usr_fltr()
5306 list_del_init(&fltr->list); in bnxt_del_one_usr_fltr()
5320 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) in bnxt_del_fltr() argument
5322 hlist_del(&fltr->hash); in bnxt_del_fltr()
5323 bnxt_del_one_usr_fltr(bp, fltr); in bnxt_del_fltr()
5324 if (fltr->flags) { in bnxt_del_fltr()
5325 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); in bnxt_del_fltr()
5328 kfree(fltr); in bnxt_del_fltr()
5343 struct bnxt_ntuple_filter *fltr; in bnxt_free_ntp_fltrs() local
5346 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { in bnxt_free_ntp_fltrs()
5347 bnxt_del_l2_filter(bp, fltr->l2_fltr); in bnxt_free_ntp_fltrs()
5348 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || in bnxt_free_ntp_fltrs()
5349 !list_empty(&fltr->base.list))) in bnxt_free_ntp_fltrs()
5351 bnxt_del_fltr(bp, &fltr->base); in bnxt_free_ntp_fltrs()
5388 struct bnxt_l2_filter *fltr; in bnxt_free_l2_filters() local
5391 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { in bnxt_free_l2_filters()
5392 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || in bnxt_free_l2_filters()
5393 !list_empty(&fltr->base.list))) in bnxt_free_l2_filters()
5395 bnxt_del_fltr(bp, &fltr->base); in bnxt_free_l2_filters()
5868 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr) in bnxt_del_l2_filter() argument
5870 if (!atomic_dec_and_test(&fltr->refcnt)) in bnxt_del_l2_filter()
5873 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { in bnxt_del_l2_filter()
5877 hlist_del_rcu(&fltr->base.hash); in bnxt_del_l2_filter()
5878 bnxt_del_one_usr_fltr(bp, &fltr->base); in bnxt_del_l2_filter()
5879 if (fltr->base.flags) { in bnxt_del_l2_filter()
5880 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); in bnxt_del_l2_filter()
5884 kfree_rcu(fltr, base.rcu); in bnxt_del_l2_filter()
5892 struct bnxt_l2_filter *fltr; in __bnxt_lookup_l2_filter() local
5894 hlist_for_each_entry_rcu(fltr, head, base.hash) { in __bnxt_lookup_l2_filter()
5895 struct bnxt_l2_key *l2_key = &fltr->l2_key; in __bnxt_lookup_l2_filter()
5899 return fltr; in __bnxt_lookup_l2_filter()
5908 struct bnxt_l2_filter *fltr = NULL; in bnxt_lookup_l2_filter() local
5911 fltr = __bnxt_lookup_l2_filter(bp, key, idx); in bnxt_lookup_l2_filter()
5912 if (fltr) in bnxt_lookup_l2_filter()
5913 atomic_inc(&fltr->refcnt); in bnxt_lookup_l2_filter()
5915 return fltr; in bnxt_lookup_l2_filter()
5995 struct bnxt_l2_filter *fltr; in bnxt_lookup_l2_filter_from_key() local
6000 fltr = bnxt_lookup_l2_filter(bp, key, idx); in bnxt_lookup_l2_filter_from_key()
6001 return fltr; in bnxt_lookup_l2_filter_from_key()
6005 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr, in bnxt_init_l2_filter() argument
6010 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr); in bnxt_init_l2_filter()
6011 fltr->l2_key.vlan = key->vlan; in bnxt_init_l2_filter()
6012 fltr->base.type = BNXT_FLTR_TYPE_L2; in bnxt_init_l2_filter()
6013 if (fltr->base.flags) { in bnxt_init_l2_filter()
6020 fltr->base.sw_id = (u16)bit_id; in bnxt_init_l2_filter()
6024 hlist_add_head_rcu(&fltr->base.hash, head); in bnxt_init_l2_filter()
6025 bnxt_insert_usr_fltr(bp, &fltr->base); in bnxt_init_l2_filter()
6026 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); in bnxt_init_l2_filter()
6027 atomic_set(&fltr->refcnt, 1); in bnxt_init_l2_filter()
6035 struct bnxt_l2_filter *fltr; in bnxt_alloc_l2_filter() local
6041 fltr = bnxt_lookup_l2_filter(bp, key, idx); in bnxt_alloc_l2_filter()
6042 if (fltr) in bnxt_alloc_l2_filter()
6043 return fltr; in bnxt_alloc_l2_filter()
6045 fltr = kzalloc(sizeof(*fltr), gfp); in bnxt_alloc_l2_filter()
6046 if (!fltr) in bnxt_alloc_l2_filter()
6049 rc = bnxt_init_l2_filter(bp, fltr, key, idx); in bnxt_alloc_l2_filter()
6052 bnxt_del_l2_filter(bp, fltr); in bnxt_alloc_l2_filter()
6053 fltr = ERR_PTR(rc); in bnxt_alloc_l2_filter()
6055 return fltr; in bnxt_alloc_l2_filter()
6062 struct bnxt_l2_filter *fltr; in bnxt_alloc_new_l2_filter() local
6069 fltr = __bnxt_lookup_l2_filter(bp, key, idx); in bnxt_alloc_new_l2_filter()
6070 if (fltr) { in bnxt_alloc_new_l2_filter()
6071 fltr = ERR_PTR(-EEXIST); in bnxt_alloc_new_l2_filter()
6074 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC); in bnxt_alloc_new_l2_filter()
6075 if (!fltr) { in bnxt_alloc_new_l2_filter()
6076 fltr = ERR_PTR(-ENOMEM); in bnxt_alloc_new_l2_filter()
6079 fltr->base.flags = flags; in bnxt_alloc_new_l2_filter()
6080 rc = bnxt_init_l2_filter(bp, fltr, key, idx); in bnxt_alloc_new_l2_filter()
6083 bnxt_del_l2_filter(bp, fltr); in bnxt_alloc_new_l2_filter()
6089 return fltr; in bnxt_alloc_new_l2_filter()
6103 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr) in bnxt_hwrm_l2_filter_free() argument
6109 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { in bnxt_hwrm_l2_filter_free()
6112 if (fltr->base.vf_idx >= pf->active_vfs) in bnxt_hwrm_l2_filter_free()
6115 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); in bnxt_hwrm_l2_filter_free()
6125 req->l2_filter_id = fltr->base.filter_id; in bnxt_hwrm_l2_filter_free()
6129 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr) in bnxt_hwrm_l2_filter_alloc() argument
6136 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { in bnxt_hwrm_l2_filter_alloc()
6139 if (fltr->base.vf_idx >= pf->active_vfs) in bnxt_hwrm_l2_filter_alloc()
6142 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); in bnxt_hwrm_l2_filter_alloc()
6154 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); in bnxt_hwrm_l2_filter_alloc()
6159 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr); in bnxt_hwrm_l2_filter_alloc()
6162 if (fltr->l2_key.vlan) { in bnxt_hwrm_l2_filter_alloc()
6168 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan); in bnxt_hwrm_l2_filter_alloc()
6175 fltr->base.filter_id = resp->l2_filter_id; in bnxt_hwrm_l2_filter_alloc()
6176 set_bit(BNXT_FLTR_VALID, &fltr->base.state); in bnxt_hwrm_l2_filter_alloc()
6183 struct bnxt_ntuple_filter *fltr) in bnxt_hwrm_cfa_ntuple_filter_free() argument
6188 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state); in bnxt_hwrm_cfa_ntuple_filter_free()
6193 req->ntuple_filter_id = fltr->base.filter_id; in bnxt_hwrm_cfa_ntuple_filter_free()
6226 struct bnxt_ntuple_filter *fltr) in bnxt_cfg_rfs_ring_tbl_idx() argument
6228 u16 rxq = fltr->base.rxq; in bnxt_cfg_rfs_ring_tbl_idx()
6230 if (fltr->base.flags & BNXT_ACT_RSS_CTX) { in bnxt_cfg_rfs_ring_tbl_idx()
6236 fltr->base.fw_vnic_id); in bnxt_cfg_rfs_ring_tbl_idx()
6264 struct bnxt_ntuple_filter *fltr) in bnxt_hwrm_cfa_ntuple_filter_alloc() argument
6268 struct bnxt_flow_masks *masks = &fltr->fmasks; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6269 struct flow_keys *keys = &fltr->fkeys; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6278 l2_fltr = fltr->l2_fltr; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6281 if (fltr->base.flags & BNXT_ACT_DROP) { in bnxt_hwrm_cfa_ntuple_filter_alloc()
6285 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr); in bnxt_hwrm_cfa_ntuple_filter_alloc()
6287 vnic = &bp->vnic_info[fltr->base.rxq + 1]; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6324 fltr->base.filter_id = resp->ntuple_filter_id; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6332 struct bnxt_l2_filter *fltr; in bnxt_hwrm_set_vnic_filter() local
6338 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL); in bnxt_hwrm_set_vnic_filter()
6339 if (IS_ERR(fltr)) in bnxt_hwrm_set_vnic_filter()
6340 return PTR_ERR(fltr); in bnxt_hwrm_set_vnic_filter()
6342 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id; in bnxt_hwrm_set_vnic_filter()
6343 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); in bnxt_hwrm_set_vnic_filter()
6345 bnxt_del_l2_filter(bp, fltr); in bnxt_hwrm_set_vnic_filter()
6347 bp->vnic_info[vnic_id].l2_filters[idx] = fltr; in bnxt_hwrm_set_vnic_filter()
6360 struct bnxt_l2_filter *fltr = vnic->l2_filters[j]; in bnxt_hwrm_clear_vnic_filter() local
6362 bnxt_hwrm_l2_filter_free(bp, fltr); in bnxt_hwrm_clear_vnic_filter()
6363 bnxt_del_l2_filter(bp, fltr); in bnxt_hwrm_clear_vnic_filter()
12778 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) in bnxt_cfg_one_usr_fltr() argument
12783 if (list_empty(&fltr->list)) in bnxt_cfg_one_usr_fltr()
12786 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) { in bnxt_cfg_one_usr_fltr()
12787 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base); in bnxt_cfg_one_usr_fltr()
12794 fltr->sw_id); in bnxt_cfg_one_usr_fltr()
12796 } else if (fltr->type == BNXT_FLTR_TYPE_L2) { in bnxt_cfg_one_usr_fltr()
12797 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base); in bnxt_cfg_one_usr_fltr()
12801 fltr->sw_id); in bnxt_cfg_one_usr_fltr()
13488 struct bnxt_l2_filter *fltr = vnic->l2_filters[i]; in bnxt_cfg_rx_mode() local
13490 bnxt_hwrm_l2_filter_free(bp, fltr); in bnxt_cfg_rx_mode()
13491 bnxt_del_l2_filter(bp, fltr); in bnxt_cfg_rx_mode()
15388 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, in bnxt_insert_ntp_filter() argument
15401 fltr->base.sw_id = (u16)bit_id; in bnxt_insert_ntp_filter()
15402 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE; in bnxt_insert_ntp_filter()
15403 fltr->base.flags |= BNXT_ACT_RING_DST; in bnxt_insert_ntp_filter()
15405 hlist_add_head_rcu(&fltr->base.hash, head); in bnxt_insert_ntp_filter()
15406 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); in bnxt_insert_ntp_filter()
15407 bnxt_insert_usr_fltr(bp, &fltr->base); in bnxt_insert_ntp_filter()
15453 struct bnxt_ntuple_filter *fltr, u32 idx) in bnxt_lookup_ntp_filter_from_idx() argument
15460 if (bnxt_fltr_match(f, fltr)) in bnxt_lookup_ntp_filter_from_idx()
15471 struct bnxt_ntuple_filter *fltr, *new_fltr; in bnxt_rx_flow_steer() local
15531 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); in bnxt_rx_flow_steer()
15532 if (fltr) { in bnxt_rx_flow_steer()
15533 rc = fltr->base.sw_id; in bnxt_rx_flow_steer()
15554 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) in bnxt_del_ntp_filter() argument
15557 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { in bnxt_del_ntp_filter()
15561 hlist_del_rcu(&fltr->base.hash); in bnxt_del_ntp_filter()
15562 bnxt_del_one_usr_fltr(bp, &fltr->base); in bnxt_del_ntp_filter()
15565 bnxt_del_l2_filter(bp, fltr->l2_fltr); in bnxt_del_ntp_filter()
15566 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); in bnxt_del_ntp_filter()
15567 kfree_rcu(fltr, base.rcu); in bnxt_del_ntp_filter()
15578 struct bnxt_ntuple_filter *fltr; in bnxt_cfg_ntp_filters() local
15582 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { in bnxt_cfg_ntp_filters()
15585 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) { in bnxt_cfg_ntp_filters()
15586 if (fltr->base.flags & BNXT_ACT_NO_AGING) in bnxt_cfg_ntp_filters()
15588 if (rps_may_expire_flow(bp->dev, fltr->base.rxq, in bnxt_cfg_ntp_filters()
15589 fltr->flow_id, in bnxt_cfg_ntp_filters()
15590 fltr->base.sw_id)) { in bnxt_cfg_ntp_filters()
15592 fltr); in bnxt_cfg_ntp_filters()
15597 fltr); in bnxt_cfg_ntp_filters()
15601 set_bit(BNXT_FLTR_VALID, &fltr->base.state); in bnxt_cfg_ntp_filters()
15605 bnxt_del_ntp_filter(bp, fltr); in bnxt_cfg_ntp_filters()