Lines Matching full:blk

78  * @blk: block type
84 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
86 return ice_sect_lkup[blk][sect];
623 * @blk: hardware block
630 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
635 if (prof >= hw->blk[blk].es.count)
638 if (fv_idx >= hw->blk[blk].es.fvw)
641 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
654 * @blk: HW block
663 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
668 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
675 * @blk: HW block
681 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
683 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
689 * @blk: HW block
697 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
705 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
709 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
713 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
714 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
716 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
725 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
726 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
734 * @blk: HW block
744 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
752 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
755 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
765 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
772 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
773 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
774 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
775 &hw->blk[blk].xlt1.ptypes[ptype];
777 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
778 hw->blk[blk].xlt1.t[ptype] = ptg;
880 * @blk: HW block
888 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
897 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
905 * @blk: HW block
910 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
914 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
915 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
916 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
925 * @blk: HW block
930 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
935 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
936 return ice_vsig_alloc_val(hw, blk, i);
944 * @blk: HW block
957 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
960 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
976 * @blk: HW block
982 static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
992 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
995 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
997 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1013 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
1018 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
1027 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
1035 * @blk: HW block
1043 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1053 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
1060 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1064 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
1091 * @blk: HW block
1101 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1115 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
1119 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
1129 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
1138 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
1139 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
1142 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1143 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
1144 &hw->blk[blk].xlt2.vsis[vsi];
1145 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
1146 hw->blk[blk].xlt2.t[vsi] = vsig;
1154 * @blk: HW block
1160 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
1173 for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
1174 hw->blk[blk].masks.count; i++)
1175 if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
1176 if (hw->blk[blk].masks.masks[i].in_use &&
1177 hw->blk[blk].masks.masks[i].idx == idx) {
1179 if (hw->blk[blk].masks.masks[i].mask == mask)
1198 * @blk: HW block
1203 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
1208 for (i = 0; i < hw->blk[blk].es.fvw; i++)
1209 if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
1218 * @blk: HW block
1225 ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
1229 struct ice_es *es = &hw->blk[blk].es;
1235 if (blk == ICE_BLK_FD)
1241 if (blk == ICE_BLK_RSS && es->symm[i] != symm)
1248 if (masks && !ice_prof_has_mask(hw, blk, i, masks))
1260 * @blk: the block type
1263 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
1265 switch (blk) {
1280 * @blk: the block type
1283 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
1285 switch (blk) {
1301 * @blk: the block to allocate the TCAM for
1309 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
1314 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
1323 * @blk: the block from which to free the TCAM entry
1329 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
1333 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
1342 * @blk: the block to allocate the profile ID for
1348 static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
1354 if (!ice_prof_id_rsrc_type(blk, &res_type))
1367 * @blk: the block from which to free the profile ID
1372 static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1377 if (!ice_prof_id_rsrc_type(blk, &res_type))
1386 * @blk: the block from which to free the profile ID
1389 static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1391 if (prof_id > hw->blk[blk].es.count)
1394 hw->blk[blk].es.ref_count[prof_id]++;
1402 * @blk: hardware block
1408 ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
1414 switch (blk) {
1427 blk);
1432 ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
1433 blk, idx, offset, val);
1439 * @blk: hardware block
1444 ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
1449 switch (blk) {
1458 blk);
1463 ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
1464 blk, prof_id, offset, enable_mask);
1470 * @blk: hardware block
1472 static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
1477 mutex_init(&hw->blk[blk].masks.lock);
1481 hw->blk[blk].masks.count = per_pf;
1482 hw->blk[blk].masks.first = hw->pf_id * per_pf;
1484 memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
1486 for (i = hw->blk[blk].masks.first;
1487 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
1488 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
1504 * @blk: hardware block
1510 ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
1518 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
1521 mutex_lock(&hw->blk[blk].masks.lock);
1523 for (i = hw->blk[blk].masks.first;
1524 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
1525 if (hw->blk[blk].masks.masks[i].in_use) {
1529 if (hw->blk[blk].masks.masks[i].mask == mask &&
1530 hw->blk[blk].masks.masks[i].idx == idx) {
1554 hw->blk[blk].masks.masks[i].in_use = true;
1555 hw->blk[blk].masks.masks[i].mask = mask;
1556 hw->blk[blk].masks.masks[i].idx = idx;
1557 hw->blk[blk].masks.masks[i].ref = 0;
1558 ice_write_prof_mask_reg(hw, blk, i, idx, mask);
1561 hw->blk[blk].masks.masks[i].ref++;
1566 mutex_unlock(&hw->blk[blk].masks.lock);
1574 * @blk: hardware block
1578 ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
1580 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
1583 if (!(mask_idx >= hw->blk[blk].masks.first &&
1584 mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
1587 mutex_lock(&hw->blk[blk].masks.lock);
1589 if (!hw->blk[blk].masks.masks[mask_idx].in_use)
1592 if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
1593 hw->blk[blk].masks.masks[mask_idx].ref--;
1598 hw->blk[blk].masks.masks[mask_idx].in_use = false;
1599 hw->blk[blk].masks.masks[mask_idx].mask = 0;
1600 hw->blk[blk].masks.masks[mask_idx].idx = 0;
1603 ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
1605 ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
1608 mutex_unlock(&hw->blk[blk].masks.lock);
1616 * @blk: hardware block
1620 ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
1625 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
1628 mask_bm = hw->blk[blk].es.mask_ena[prof_id];
1631 ice_free_prof_mask(hw, blk, i);
1639 * @blk: hardware block
1643 static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
1647 mutex_lock(&hw->blk[blk].masks.lock);
1649 for (i = hw->blk[blk].masks.first;
1650 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
1651 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
1653 hw->blk[blk].masks.masks[i].in_use = false;
1654 hw->blk[blk].masks.masks[i].idx = 0;
1655 hw->blk[blk].masks.masks[i].mask = 0;
1658 mutex_unlock(&hw->blk[blk].masks.lock);
1659 mutex_destroy(&hw->blk[blk].masks.lock);
1677 * @blk: hardware block
1682 ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
1691 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
1694 for (i = 0; i < hw->blk[blk].es.fvw; i++)
1696 if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
1709 ice_free_prof_mask(hw, blk, i);
1715 ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
1718 hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
1726 * @blk: the block in which to write the extraction sequence
1732 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
1737 off = prof_id * hw->blk[blk].es.fvw;
1739 memset(&hw->blk[blk].es.t[off], 0,
1740 hw->blk[blk].es.fvw * sizeof(*fv));
1741 hw->blk[blk].es.written[prof_id] = false;
1743 memcpy(&hw->blk[blk].es.t[off], fv,
1744 hw->blk[blk].es.fvw * sizeof(*fv));
1747 if (blk == ICE_BLK_RSS)
1748 hw->blk[blk].es.symm[prof_id] = symm;
1754 * @blk: the block from which to free the profile ID
1758 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1760 if (prof_id > hw->blk[blk].es.count)
1763 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
1764 if (!--hw->blk[blk].es.ref_count[prof_id]) {
1765 ice_write_es(hw, blk, prof_id, NULL, false);
1766 ice_free_prof_masks(hw, blk, prof_id);
1767 return ice_free_prof_id(hw, blk, prof_id);
1820 * @blk: the HW block to initialize
1822 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
1826 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
1829 ptg = hw->blk[blk].xlt1.t[pt];
1831 ice_ptg_alloc_val(hw, blk, ptg);
1832 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
1840 * @blk: the HW block to initialize
1842 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
1846 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
1849 vsig = hw->blk[blk].xlt2.t[vsi];
1851 ice_vsig_alloc_val(hw, blk, vsig);
1852 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
1856 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
1922 sizeof(*hw->blk[block_id].xlt1.t);
1923 dst = hw->blk[block_id].xlt1.t;
1924 dst_len = hw->blk[block_id].xlt1.count *
1925 sizeof(*hw->blk[block_id].xlt1.t);
1935 sizeof(*hw->blk[block_id].xlt2.t);
1936 dst = (u8 *)hw->blk[block_id].xlt2.t;
1937 dst_len = hw->blk[block_id].xlt2.count *
1938 sizeof(*hw->blk[block_id].xlt2.t);
1948 sizeof(*hw->blk[block_id].prof.t);
1949 dst = (u8 *)hw->blk[block_id].prof.t;
1950 dst_len = hw->blk[block_id].prof.count *
1951 sizeof(*hw->blk[block_id].prof.t);
1961 sizeof(*hw->blk[block_id].prof_redir.t);
1962 dst = hw->blk[block_id].prof_redir.t;
1963 dst_len = hw->blk[block_id].prof_redir.count *
1964 sizeof(*hw->blk[block_id].prof_redir.t);
1974 hw->blk[block_id].es.fvw) *
1975 sizeof(*hw->blk[block_id].es.t);
1976 dst = (u8 *)hw->blk[block_id].es.t;
1977 dst_len = (u32)(hw->blk[block_id].es.count *
1978 hw->blk[block_id].es.fvw) *
1979 sizeof(*hw->blk[block_id].es.t);
2020 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
2021 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
2022 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
2023 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
2024 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
2037 struct ice_es *es = &hw->blk[blk_idx].es;
2082 * @blk: the HW block on which to free the VSIG table entries
2084 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
2088 if (!hw->blk[blk].xlt2.vsig_tbl)
2092 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2093 ice_vsig_free(hw, blk, i);
2106 if (hw->blk[i].is_list_init) {
2107 struct ice_es *es = &hw->blk[i].es;
2115 hw->blk[i].is_list_init = false;
2118 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
2119 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
2120 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
2121 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
2122 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
2123 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
2124 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
2125 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
2126 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
2127 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
2128 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.symm);
2129 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
2130 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
2131 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_id.id);
2140 memset(hw->blk, 0, sizeof(hw->blk));
2163 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
2164 struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
2165 struct ice_prof_tcam *prof = &hw->blk[i].prof;
2166 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
2167 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
2168 struct ice_es *es = &hw->blk[i].es;
2170 if (hw->blk[i].is_list_init) {
2213 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
2214 struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
2215 struct ice_prof_tcam *prof = &hw->blk[i].prof;
2216 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
2217 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
2218 struct ice_es *es = &hw->blk[i].es;
2221 if (hw->blk[i].is_list_init)
2227 hw->blk[i].is_list_init = true;
2229 hw->blk[i].overwrite = blk_sizes[i].overwrite;
2342 * @blk: the block in which to write profile ID to
2353 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
2364 switch (hw->blk[blk].prof.cdid_bits) {
2397 * @blk: the block in which to write profile ID to
2409 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
2418 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
2419 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
2421 hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
2422 hw->blk[blk].prof.t[idx].prof_id = prof_id;
2431 * @blk: HW block
2436 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
2443 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2446 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2458 * @blk: HW block
2463 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
2468 list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2481 * @blk: hardware block
2486 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
2489 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
2494 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
2498 id = ice_sect_id(blk, ICE_VEC_TBL);
2510 memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
2519 * @blk: hardware block
2524 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
2534 id = ice_sect_id(blk, ICE_PROF_TCAM);
2546 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
2547 sizeof(hw->blk[blk].prof.t->key));
2555 * @blk: hardware block
2560 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
2570 id = ice_sect_id(blk, ICE_XLT1);
2587 * @blk: hardware block
2592 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
2605 id = ice_sect_id(blk, ICE_XLT2);
2627 * @blk: hardware block
2631 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
2681 status = ice_prof_bld_es(hw, blk, b, chgs);
2687 status = ice_prof_bld_tcam(hw, blk, b, chgs);
2693 status = ice_prof_bld_xlt1(blk, b, chgs);
2699 status = ice_prof_bld_xlt2(blk, b, chgs);
2803 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
2859 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
2896 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
2995 fvw_num = hw->blk[ICE_BLK_FD].es.fvw / ICE_FDIR_REG_SET_SIZE;
3030 * @blk: hardware block
3046 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
3059 mutex_lock(&hw->blk[blk].es.prof_map_lock);
3062 status = ice_find_prof_id_with_mask(hw, blk, es, masks, symm, &prof_id);
3065 status = ice_alloc_prof_id(hw, blk, &prof_id);
3068 if (blk == ICE_BLK_FD && fd_swap) {
3079 } else if (blk == ICE_BLK_FD) {
3082 status = ice_update_prof_masking(hw, blk, prof_id, masks);
3087 ice_write_es(hw, blk, prof_id, es, symm);
3090 ice_prof_inc_ref(hw, blk, prof_id);
3111 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
3136 list_add(&prof->list, &hw->blk[blk].es.prof_map);
3140 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3147 * @blk: hardware block
3154 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
3159 list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
3171 * @blk: hardware block
3175 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
3180 list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3190 * @blk: hardware block
3193 static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
3202 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
3208 status = ice_free_tcam_ent(hw, blk, idx);
3216 * @blk: hardware block
3220 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
3229 status = ice_rel_tcam_idx(hw, blk,
3241 * @blk: hardware block
3246 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3255 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3259 status = ice_rem_prof_id(hw, blk, d);
3268 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3285 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
3292 return ice_vsig_free(hw, blk, vsig);
3298 * @blk: hardware block
3304 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
3311 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3316 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
3318 return ice_rem_vsig(hw, blk, vsig, chg);
3320 status = ice_rem_prof_id(hw, blk, p);
3334 * @blk: hardware block
3337 static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
3347 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
3348 if (ice_has_prof_vsig(hw, blk, i, id)) {
3349 status = ice_rem_prof_id_vsig(hw, blk, i, id,
3356 status = ice_upd_prof_hw(hw, blk, &chg);
3370 * @blk: hardware block
3377 int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
3382 mutex_lock(&hw->blk[blk].es.prof_map_lock);
3384 pmap = ice_search_prof_id(hw, blk, id);
3391 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
3396 ice_prof_dec_ref(hw, blk, pmap->prof_id);
3402 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3409 * @blk: hardware block
3414 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
3422 mutex_lock(&hw->blk[blk].es.prof_map_lock);
3424 map = ice_search_prof_id(hw, blk, hdl);
3431 if (!hw->blk[blk].es.written[map->prof_id]) {
3448 hw->blk[blk].es.written[map->prof_id] = true;
3454 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3462 * @blk: hardware block
3469 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3475 list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3502 * @blk: hardware block
3507 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
3515 mutex_lock(&hw->blk[blk].es.prof_map_lock);
3516 map = ice_search_prof_id(hw, blk, hdl);
3541 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3548 * @blk: hardware block
3554 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
3565 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
3567 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3605 * @blk: hardware block
3614 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
3627 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
3644 status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
3654 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
3682 * @blk: hardware block
3687 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3707 list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3721 status = ice_prof_tcam_ena_dis(hw, blk, false,
3732 status = ice_prof_tcam_ena_dis(hw, blk, true,
3751 * @blk: hardware block
3758 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
3772 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
3780 mutex_lock(&hw->blk[blk].es.prof_map_lock);
3782 map = ice_search_prof_id(hw, blk, hdl);
3808 status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
3829 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
3846 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
3849 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
3851 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3855 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3864 * @blk: hardware block
3870 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
3881 new_vsig = ice_vsig_alloc(hw, blk);
3887 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
3891 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
3913 * @blk: hardware block
3920 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
3928 vsig = ice_vsig_alloc(hw, blk);
3932 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
3938 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
3952 * @blk: hardware block
3957 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
3972 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
3983 * @blk: hardware block
3992 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4005 status = ice_get_prof(hw, blk, hdl, &chg);
4010 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4023 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
4029 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4037 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
4041 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
4046 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
4049 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4057 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
4067 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
4073 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4078 status = ice_create_vsig_from_lst(hw, blk, vsi,
4085 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4092 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
4095 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4101 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
4110 status = ice_upd_prof_hw(hw, blk, &chg);
4129 * @blk: HW block
4140 ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
4146 if (blk != ICE_BLK_FD)
4150 status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
4158 status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
4169 ice_rem_prof_id_flow(hw, blk, vsi_num, hdl);
4198 * @blk: hardware block
4207 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4219 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4226 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
4227 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4243 status = ice_rem_vsig(hw, blk, vsig, &chg);
4247 status = ice_rem_prof_id_vsig(hw, blk, vsig,
4253 status = ice_adj_prof_priorities(hw, blk, vsig,
4261 status = ice_get_profs_vsig(hw, blk, vsig, &copy);
4271 status = ice_move_vsi(hw, blk, vsi,
4276 } else if (!ice_find_dup_props_vsig(hw, blk, &copy,
4285 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4293 status = ice_create_vsig_from_lst(hw, blk, vsi,
4300 status = ice_adj_prof_priorities(hw, blk, vsig,
4312 status = ice_upd_prof_hw(hw, blk, &chg);