Lines Matching +full:3 +full:- +full:tuples

1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
326 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
327 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
328 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
329 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
330 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
331 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
332 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
333 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
334 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
335 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
336 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
337 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
338 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
339 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
340 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
341 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
343 offsetof(struct hclge_fd_rule, tuples.dst_mac),
346 offsetof(struct hclge_fd_rule, tuples.src_mac),
349 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
351 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
353 offsetof(struct hclge_fd_rule, tuples.ether_proto),
356 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
359 offsetof(struct hclge_fd_rule, tuples.ip_tos),
362 offsetof(struct hclge_fd_rule, tuples.ip_proto),
365 offsetof(struct hclge_fd_rule, tuples.src_ip),
368 offsetof(struct hclge_fd_rule, tuples.dst_ip),
371 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
374 offsetof(struct hclge_fd_rule, tuples.src_port),
377 offsetof(struct hclge_fd_rule, tuples.dst_port),
380 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
385 * hclge_cmd_send - send command to command queue
395 return hclge_comm_cmd_send(&hw->hw, desc, num); in hclge_cmd_send()
420 if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) in hclge_trace_cmd_get()
444 u64 *data = (u64 *)(&hdev->mac_stats); in hclge_mac_update_stats_defective()
452 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); in hclge_mac_update_stats_defective()
454 dev_err(&hdev->pdev->dev, in hclge_mac_update_stats_defective()
460 /* The first desc has a 64-bit header, so data size need to minus 1 */ in hclge_mac_update_stats_defective()
461 data_size = sizeof(desc) / (sizeof(u64)) - 1; in hclge_mac_update_stats_defective()
480 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; in hclge_mac_update_stats_complete()
481 u64 *data = (u64 *)(&hdev->mac_stats); in hclge_mac_update_stats_complete()
489 /* The first desc has a 64-bit header, so need to consider it */ in hclge_mac_update_stats_complete()
497 return -ENOMEM; in hclge_mac_update_stats_complete()
500 ret = hclge_cmd_send(&hdev->hw, desc, desc_num); in hclge_mac_update_stats_complete()
506 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); in hclge_mac_update_stats_complete()
533 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { in hclge_mac_query_reg_num()
539 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_mac_query_reg_num()
541 dev_err(&hdev->pdev->dev, in hclge_mac_query_reg_num()
549 dev_err(&hdev->pdev->dev, in hclge_mac_query_reg_num()
551 return -ENODATA; in hclge_mac_query_reg_num()
560 if (hdev->ae_dev->dev_specs.mac_stats_num) in hclge_mac_update_stats()
574 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_count()
588 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_stats()
591 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); in hclge_comm_get_stats()
608 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_strings()
620 handle = &hdev->vport[0].nic; in hclge_update_stats_for_all()
621 if (handle->client) { in hclge_update_stats_for_all()
622 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); in hclge_update_stats_for_all()
624 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
634 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
641 struct hclge_dev *hdev = vport->back; in hclge_update_stats()
644 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) in hclge_update_stats()
649 dev_err(&hdev->pdev->dev, in hclge_update_stats()
653 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); in hclge_update_stats()
655 dev_err(&hdev->pdev->dev, in hclge_update_stats()
659 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); in hclge_update_stats()
671 struct hclge_dev *hdev = vport->back; in hclge_get_sset_count()
681 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); in hclge_get_sset_count()
682 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || in hclge_get_sset_count()
683 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || in hclge_get_sset_count()
684 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || in hclge_get_sset_count()
685 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { in hclge_get_sset_count()
687 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; in hclge_get_sset_count()
690 if (hdev->ae_dev->dev_specs.hilink_version != in hclge_get_sset_count()
693 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; in hclge_get_sset_count()
697 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; in hclge_get_sset_count()
699 handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK; in hclge_get_sset_count()
701 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && in hclge_get_sset_count()
702 hdev->hw.mac.phydev->drv->set_loopback) || in hclge_get_sset_count()
705 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; in hclge_get_sset_count()
720 struct hclge_dev *hdev = vport->back; in hclge_get_strings()
730 if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) { in hclge_get_strings()
734 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { in hclge_get_strings()
738 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { in hclge_get_strings()
742 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { in hclge_get_strings()
746 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { in hclge_get_strings()
756 struct hclge_dev *hdev = vport->back; in hclge_get_stats()
768 struct hclge_dev *hdev = vport->back; in hclge_get_mac_stat()
772 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; in hclge_get_mac_stat()
773 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; in hclge_get_mac_stat()
781 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) in hclge_parse_func_status()
782 return -EINVAL; in hclge_parse_func_status()
785 if (status->pf_state & HCLGE_PF_STATE_MAIN) in hclge_parse_func_status()
786 hdev->flag |= HCLGE_FLAG_MAIN; in hclge_parse_func_status()
788 hdev->flag &= ~HCLGE_FLAG_MAIN; in hclge_parse_func_status()
790 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; in hclge_parse_func_status()
807 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_function_status()
809 dev_err(&hdev->pdev->dev, in hclge_query_function_status()
815 if (req->pf_state) in hclge_query_function_status()
830 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_pf_resource()
832 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
838 hdev->num_tqps = le16_to_cpu(req->tqp_num) + in hclge_query_pf_resource()
839 le16_to_cpu(req->ext_tqp_num); in hclge_query_pf_resource()
840 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
842 if (req->tx_buf_size) in hclge_query_pf_resource()
843 hdev->tx_buf_size = in hclge_query_pf_resource()
844 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
846 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; in hclge_query_pf_resource()
848 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
850 if (req->dv_buf_size) in hclge_query_pf_resource()
851 hdev->dv_buf_size = in hclge_query_pf_resource()
852 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
854 hdev->dv_buf_size = HCLGE_DEFAULT_DV; in hclge_query_pf_resource()
856 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
858 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); in hclge_query_pf_resource()
859 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { in hclge_query_pf_resource()
860 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
862 hdev->num_nic_msi); in hclge_query_pf_resource()
863 return -EINVAL; in hclge_query_pf_resource()
867 hdev->num_roce_msi = in hclge_query_pf_resource()
868 le16_to_cpu(req->pf_intr_vector_number_roce); in hclge_query_pf_resource()
873 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; in hclge_query_pf_resource()
875 hdev->num_msi = hdev->num_nic_msi; in hclge_query_pf_resource()
912 return -EINVAL; in hclge_parse_speed()
941 return -EINVAL; in hclge_get_speed_bit()
947 struct hclge_dev *hdev = vport->back; in hclge_check_port_speed()
948 u32 speed_ability = hdev->hw.mac.speed_ability; in hclge_check_port_speed()
959 return -EINVAL; in hclge_check_port_speed()
964 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported); in hclge_update_fec_support()
965 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); in hclge_update_fec_support()
966 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported); in hclge_update_fec_support()
967 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); in hclge_update_fec_support()
969 if (mac->fec_ability & BIT(HNAE3_FEC_BASER)) in hclge_update_fec_support()
971 mac->supported); in hclge_update_fec_support()
972 if (mac->fec_ability & BIT(HNAE3_FEC_RS)) in hclge_update_fec_support()
974 mac->supported); in hclge_update_fec_support()
975 if (mac->fec_ability & BIT(HNAE3_FEC_LLRS)) in hclge_update_fec_support()
977 mac->supported); in hclge_update_fec_support()
978 if (mac->fec_ability & BIT(HNAE3_FEC_NONE)) in hclge_update_fec_support()
980 mac->supported); in hclge_update_fec_support()
1088 if (mac->fec_ability) in hclge_convert_setting_fec()
1091 switch (mac->speed) { in hclge_convert_setting_fec()
1094 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) | in hclge_convert_setting_fec()
1099 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) | in hclge_convert_setting_fec()
1103 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | in hclge_convert_setting_fec()
1107 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | in hclge_convert_setting_fec()
1111 mac->fec_ability = 0; in hclge_convert_setting_fec()
1122 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_fiber_link_mode()
1126 mac->supported); in hclge_parse_fiber_link_mode()
1128 hclge_convert_setting_sr(speed_ability, mac->supported); in hclge_parse_fiber_link_mode()
1129 hclge_convert_setting_lr(speed_ability, mac->supported); in hclge_parse_fiber_link_mode()
1130 hclge_convert_setting_cr(speed_ability, mac->supported); in hclge_parse_fiber_link_mode()
1135 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); in hclge_parse_fiber_link_mode()
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); in hclge_parse_fiber_link_mode()
1138 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); in hclge_parse_fiber_link_mode()
1144 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_backplane_link_mode()
1146 hclge_convert_setting_kr(speed_ability, mac->supported); in hclge_parse_backplane_link_mode()
1151 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); in hclge_parse_backplane_link_mode()
1153 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); in hclge_parse_backplane_link_mode()
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); in hclge_parse_backplane_link_mode()
1160 unsigned long *supported = hdev->hw.mac.supported; in hclge_parse_copper_link_mode()
1193 u8 media_type = hdev->hw.mac.media_type; in hclge_parse_link_mode()
1249 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), in hclge_parse_cfg()
1251 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), in hclge_parse_cfg()
1255 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1258 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1261 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1265 mac_addr_tmp = __le32_to_cpu(req->param[2]); in hclge_parse_cfg()
1266 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), in hclge_parse_cfg()
1272 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), in hclge_parse_cfg()
1275 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), in hclge_parse_cfg()
1280 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; in hclge_parse_cfg()
1283 cfg->numa_node_map = __le32_to_cpu(req->param[0]); in hclge_parse_cfg()
1285 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1288 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1291 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT; in hclge_parse_cfg()
1293 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1297 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1301 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), in hclge_parse_cfg()
1312 cfg->pf_rss_size_max = cfg->pf_rss_size_max ? in hclge_parse_cfg()
1313 1U << cfg->pf_rss_size_max : in hclge_parse_cfg()
1314 cfg->vf_rss_size_max; in hclge_parse_cfg()
1320 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]), in hclge_parse_cfg()
1323 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT; in hclge_parse_cfg()
1348 req->offset = cpu_to_le32(offset); in hclge_get_cfg()
1351 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); in hclge_get_cfg()
1353 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); in hclge_get_cfg()
1366 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_set_default_dev_specs()
1368 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; in hclge_set_default_dev_specs()
1369 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; in hclge_set_default_dev_specs()
1370 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; in hclge_set_default_dev_specs()
1371 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; in hclge_set_default_dev_specs()
1372 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; in hclge_set_default_dev_specs()
1373 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; in hclge_set_default_dev_specs()
1374 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; in hclge_set_default_dev_specs()
1375 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; in hclge_set_default_dev_specs()
1376 ae_dev->dev_specs.tnl_num = 0; in hclge_set_default_dev_specs()
1382 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_parse_dev_specs()
1389 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; in hclge_parse_dev_specs()
1390 ae_dev->dev_specs.rss_ind_tbl_size = in hclge_parse_dev_specs()
1391 le16_to_cpu(req0->rss_ind_tbl_size); in hclge_parse_dev_specs()
1392 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); in hclge_parse_dev_specs()
1393 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); in hclge_parse_dev_specs()
1394 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); in hclge_parse_dev_specs()
1395 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); in hclge_parse_dev_specs()
1396 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); in hclge_parse_dev_specs()
1397 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); in hclge_parse_dev_specs()
1398 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); in hclge_parse_dev_specs()
1399 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); in hclge_parse_dev_specs()
1400 ae_dev->dev_specs.tnl_num = req1->tnl_num; in hclge_parse_dev_specs()
1401 ae_dev->dev_specs.hilink_version = req1->hilink_version; in hclge_parse_dev_specs()
1406 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; in hclge_check_dev_specs()
1408 if (!dev_specs->max_non_tso_bd_num) in hclge_check_dev_specs()
1409 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; in hclge_check_dev_specs()
1410 if (!dev_specs->rss_ind_tbl_size) in hclge_check_dev_specs()
1411 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; in hclge_check_dev_specs()
1412 if (!dev_specs->rss_key_size) in hclge_check_dev_specs()
1413 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; in hclge_check_dev_specs()
1414 if (!dev_specs->max_tm_rate) in hclge_check_dev_specs()
1415 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; in hclge_check_dev_specs()
1416 if (!dev_specs->max_qset_num) in hclge_check_dev_specs()
1417 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM; in hclge_check_dev_specs()
1418 if (!dev_specs->max_int_gl) in hclge_check_dev_specs()
1419 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; in hclge_check_dev_specs()
1420 if (!dev_specs->max_frm_size) in hclge_check_dev_specs()
1421 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; in hclge_check_dev_specs()
1422 if (!dev_specs->umv_size) in hclge_check_dev_specs()
1423 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; in hclge_check_dev_specs()
1432 if (ret && ret != -EOPNOTSUPP) in hclge_query_mac_stats_num()
1435 hdev->ae_dev->dev_specs.mac_stats_num = reg_num; in hclge_query_mac_stats_num()
1452 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { in hclge_query_dev_specs()
1457 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) { in hclge_query_dev_specs()
1464 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); in hclge_query_dev_specs()
1480 dev_err(&hdev->pdev->dev, in hclge_get_cap()
1497 dev_info(&hdev->pdev->dev, in hclge_init_kdump_kernel_config()
1501 hdev->num_tqps = hdev->num_req_vfs + 1; in hclge_init_kdump_kernel_config()
1502 hdev->num_tx_desc = HCLGE_MIN_TX_DESC; in hclge_init_kdump_kernel_config()
1503 hdev->num_rx_desc = HCLGE_MIN_RX_DESC; in hclge_init_kdump_kernel_config()
1510 if (hdev->tc_max > HNAE3_MAX_TC || in hclge_init_tc_config()
1511 hdev->tc_max < 1) { in hclge_init_tc_config()
1512 dev_warn(&hdev->pdev->dev, "TC num = %u.\n", in hclge_init_tc_config()
1513 hdev->tc_max); in hclge_init_tc_config()
1514 hdev->tc_max = 1; in hclge_init_tc_config()
1519 hdev->tc_max = 1; in hclge_init_tc_config()
1520 hdev->pfc_max = 0; in hclge_init_tc_config()
1522 hdev->pfc_max = hdev->tc_max; in hclge_init_tc_config()
1525 hdev->tm_info.num_tc = 1; in hclge_init_tc_config()
1528 for (i = 0; i < hdev->tm_info.num_tc; i++) in hclge_init_tc_config()
1529 hnae3_set_bit(hdev->hw_tc_map, i, 1); in hclge_init_tc_config()
1531 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; in hclge_init_tc_config()
1536 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_configure()
1544 hdev->base_tqp_pid = 0; in hclge_configure()
1545 hdev->vf_rss_size_max = cfg.vf_rss_size_max; in hclge_configure()
1546 hdev->pf_rss_size_max = cfg.pf_rss_size_max; in hclge_configure()
1547 hdev->rx_buf_len = cfg.rx_buf_len; in hclge_configure()
1548 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); in hclge_configure()
1549 hdev->hw.mac.media_type = cfg.media_type; in hclge_configure()
1550 hdev->hw.mac.phy_addr = cfg.phy_addr; in hclge_configure()
1551 hdev->num_tx_desc = cfg.tqp_desc_num; in hclge_configure()
1552 hdev->num_rx_desc = cfg.tqp_desc_num; in hclge_configure()
1553 hdev->tm_info.num_pg = 1; in hclge_configure()
1554 hdev->tc_max = cfg.tc_num; in hclge_configure()
1555 hdev->tm_info.hw_pfc_map = 0; in hclge_configure()
1557 hdev->wanted_umv_size = cfg.umv_space; in hclge_configure()
1559 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; in hclge_configure()
1560 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; in hclge_configure()
1561 hdev->gro_en = true; in hclge_configure()
1563 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); in hclge_configure()
1565 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_configure()
1566 hdev->fd_en = true; in hclge_configure()
1567 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_configure()
1570 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); in hclge_configure()
1572 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", in hclge_configure()
1576 hdev->hw.mac.req_speed = hdev->hw.mac.speed; in hclge_configure()
1577 hdev->hw.mac.req_autoneg = AUTONEG_ENABLE; in hclge_configure()
1578 hdev->hw.mac.req_duplex = DUPLEX_FULL; in hclge_configure()
1582 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); in hclge_configure()
1599 req->tso_mss_min = cpu_to_le16(tso_mss_min); in hclge_config_tso()
1600 req->tso_mss_max = cpu_to_le16(tso_mss_max); in hclge_config_tso()
1602 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_tso()
1611 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) in hclge_config_gro()
1617 req->gro_en = hdev->gro_en ? 1 : 0; in hclge_config_gro()
1619 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_gro()
1621 dev_err(&hdev->pdev->dev, in hclge_config_gro()
1629 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_alloc_tqps()
1633 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, in hclge_alloc_tqps()
1635 if (!hdev->htqp) in hclge_alloc_tqps()
1636 return -ENOMEM; in hclge_alloc_tqps()
1638 tqp = hdev->htqp; in hclge_alloc_tqps()
1640 for (i = 0; i < hdev->num_tqps; i++) { in hclge_alloc_tqps()
1641 tqp->dev = &hdev->pdev->dev; in hclge_alloc_tqps()
1642 tqp->index = i; in hclge_alloc_tqps()
1644 tqp->q.ae_algo = &ae_algo; in hclge_alloc_tqps()
1645 tqp->q.buf_size = hdev->rx_buf_len; in hclge_alloc_tqps()
1646 tqp->q.tx_desc_num = hdev->num_tx_desc; in hclge_alloc_tqps()
1647 tqp->q.rx_desc_num = hdev->num_rx_desc; in hclge_alloc_tqps()
1653 tqp->q.io_base = hdev->hw.hw.io_base + in hclge_alloc_tqps()
1657 tqp->q.io_base = hdev->hw.hw.io_base + in hclge_alloc_tqps()
1660 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) * in hclge_alloc_tqps()
1667 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) in hclge_alloc_tqps()
1668 tqp->q.mem_base = hdev->hw.hw.mem_base + in hclge_alloc_tqps()
1687 req->tqp_id = cpu_to_le16(tqp_pid); in hclge_map_tqps_to_func()
1688 req->tqp_vf = func_id; in hclge_map_tqps_to_func()
1689 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B; in hclge_map_tqps_to_func()
1691 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B; in hclge_map_tqps_to_func()
1692 req->tqp_vid = cpu_to_le16(tqp_vid); in hclge_map_tqps_to_func()
1694 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_map_tqps_to_func()
1696 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); in hclge_map_tqps_to_func()
1703 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; in hclge_assign_tqp()
1704 struct hclge_dev *hdev = vport->back; in hclge_assign_tqp()
1707 for (i = 0, alloced = 0; i < hdev->num_tqps && in hclge_assign_tqp()
1709 if (!hdev->htqp[i].alloced) { in hclge_assign_tqp()
1710 hdev->htqp[i].q.handle = &vport->nic; in hclge_assign_tqp()
1711 hdev->htqp[i].q.tqp_index = alloced; in hclge_assign_tqp()
1712 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; in hclge_assign_tqp()
1713 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; in hclge_assign_tqp()
1714 kinfo->tqp[alloced] = &hdev->htqp[i].q; in hclge_assign_tqp()
1715 hdev->htqp[i].alloced = true; in hclge_assign_tqp()
1719 vport->alloc_tqps = alloced; in hclge_assign_tqp()
1720 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, in hclge_assign_tqp()
1721 vport->alloc_tqps / hdev->tm_info.num_tc); in hclge_assign_tqp()
1724 kinfo->rss_size = min_t(u16, kinfo->rss_size, in hclge_assign_tqp()
1725 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); in hclge_assign_tqp()
1734 struct hnae3_handle *nic = &vport->nic; in hclge_knic_setup()
1735 struct hnae3_knic_private_info *kinfo = &nic->kinfo; in hclge_knic_setup()
1736 struct hclge_dev *hdev = vport->back; in hclge_knic_setup()
1739 kinfo->num_tx_desc = num_tx_desc; in hclge_knic_setup()
1740 kinfo->num_rx_desc = num_rx_desc; in hclge_knic_setup()
1742 kinfo->rx_buf_len = hdev->rx_buf_len; in hclge_knic_setup()
1743 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size; in hclge_knic_setup()
1745 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, in hclge_knic_setup()
1747 if (!kinfo->tqp) in hclge_knic_setup()
1748 return -ENOMEM; in hclge_knic_setup()
1752 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); in hclge_knic_setup()
1760 struct hnae3_handle *nic = &vport->nic; in hclge_map_tqp_to_vport()
1764 kinfo = &nic->kinfo; in hclge_map_tqp_to_vport()
1765 for (i = 0; i < vport->alloc_tqps; i++) { in hclge_map_tqp_to_vport()
1767 container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); in hclge_map_tqp_to_vport()
1771 is_pf = !(vport->vport_id); in hclge_map_tqp_to_vport()
1772 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, in hclge_map_tqp_to_vport()
1783 struct hclge_vport *vport = hdev->vport; in hclge_map_tqp()
1786 num_vport = hdev->num_req_vfs + 1; in hclge_map_tqp()
1802 struct hnae3_handle *nic = &vport->nic; in hclge_vport_setup()
1803 struct hclge_dev *hdev = vport->back; in hclge_vport_setup()
1806 nic->pdev = hdev->pdev; in hclge_vport_setup()
1807 nic->ae_algo = &ae_algo; in hclge_vport_setup()
1808 bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits, in hclge_vport_setup()
1810 nic->kinfo.io_base = hdev->hw.hw.io_base; in hclge_vport_setup()
1813 hdev->num_tx_desc, hdev->num_rx_desc); in hclge_vport_setup()
1815 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); in hclge_vport_setup()
1822 struct pci_dev *pdev = hdev->pdev; in hclge_alloc_vport()
1830 num_vport = hdev->num_req_vfs + 1; in hclge_alloc_vport()
1832 if (hdev->num_tqps < num_vport) { in hclge_alloc_vport()
1833 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", in hclge_alloc_vport()
1834 hdev->num_tqps, num_vport); in hclge_alloc_vport()
1835 return -EINVAL; in hclge_alloc_vport()
1839 tqp_per_vport = hdev->num_tqps / num_vport; in hclge_alloc_vport()
1840 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; in hclge_alloc_vport()
1842 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), in hclge_alloc_vport()
1845 return -ENOMEM; in hclge_alloc_vport()
1847 hdev->vport = vport; in hclge_alloc_vport()
1848 hdev->num_alloc_vport = num_vport; in hclge_alloc_vport()
1851 hdev->num_alloc_vfs = hdev->num_req_vfs; in hclge_alloc_vport()
1854 vport->back = hdev; in hclge_alloc_vport()
1855 vport->vport_id = i; in hclge_alloc_vport()
1856 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; in hclge_alloc_vport()
1857 vport->mps = HCLGE_MAC_DEFAULT_FRAME; in hclge_alloc_vport()
1858 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; in hclge_alloc_vport()
1859 vport->port_base_vlan_cfg.tbl_sta = true; in hclge_alloc_vport()
1860 vport->rxvlan_cfg.rx_vlan_offload_en = true; in hclge_alloc_vport()
1861 vport->req_vlan_fltr_en = true; in hclge_alloc_vport()
1862 INIT_LIST_HEAD(&vport->vlan_list); in hclge_alloc_vport()
1863 INIT_LIST_HEAD(&vport->uc_mac_list); in hclge_alloc_vport()
1864 INIT_LIST_HEAD(&vport->mc_mac_list); in hclge_alloc_vport()
1865 spin_lock_init(&vport->mac_list_lock); in hclge_alloc_vport()
1872 dev_err(&pdev->dev, in hclge_alloc_vport()
1899 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; in hclge_cmd_alloc_tx_buff()
1901 req->tx_pkt_buff[i] = in hclge_cmd_alloc_tx_buff()
1906 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_alloc_tx_buff()
1908 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", in hclge_cmd_alloc_tx_buff()
1920 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); in hclge_tx_buffer_alloc()
1931 if (hdev->hw_tc_map & BIT(i)) in hclge_get_tc_num()
1945 priv = &buf_alloc->priv_buf[i]; in hclge_get_pfc_priv_num()
1946 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_pfc_priv_num()
1947 priv->enable) in hclge_get_pfc_priv_num()
1963 priv = &buf_alloc->priv_buf[i]; in hclge_get_no_pfc_priv_num()
1964 if (hdev->hw_tc_map & BIT(i) && in hclge_get_no_pfc_priv_num()
1965 !(hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_no_pfc_priv_num()
1966 priv->enable) in hclge_get_no_pfc_priv_num()
1980 priv = &buf_alloc->priv_buf[i]; in hclge_get_rx_priv_buff_alloced()
1981 if (priv->enable) in hclge_get_rx_priv_buff_alloced()
1982 rx_priv += priv->buf_size; in hclge_get_rx_priv_buff_alloced()
1992 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; in hclge_get_tx_buff_alloced()
2007 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_is_rx_buf_ok()
2011 hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2014 + hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2024 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); in hclge_is_rx_buf_ok()
2025 buf_alloc->s_buf.buf_size = shared_buf; in hclge_is_rx_buf_ok()
2027 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2028 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high in hclge_is_rx_buf_ok()
2029 - roundup(aligned_mps / HCLGE_BUF_DIV_BY, in hclge_is_rx_buf_ok()
2032 buf_alloc->s_buf.self.high = aligned_mps + in hclge_is_rx_buf_ok()
2034 buf_alloc->s_buf.self.low = aligned_mps; in hclge_is_rx_buf_ok()
2038 hi_thrd = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2049 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY; in hclge_is_rx_buf_ok()
2056 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; in hclge_is_rx_buf_ok()
2057 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; in hclge_is_rx_buf_ok()
2068 total_size = hdev->pkt_buf_size; in hclge_tx_buffer_calc()
2072 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_tx_buffer_calc()
2074 if (hdev->hw_tc_map & BIT(i)) { in hclge_tx_buffer_calc()
2075 if (total_size < hdev->tx_buf_size) in hclge_tx_buffer_calc()
2076 return -ENOMEM; in hclge_tx_buffer_calc()
2078 priv->tx_buf_size = hdev->tx_buf_size; in hclge_tx_buffer_calc()
2080 priv->tx_buf_size = 0; in hclge_tx_buffer_calc()
2083 total_size -= priv->tx_buf_size; in hclge_tx_buffer_calc()
2092 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_rx_buf_calc_all()
2093 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_rx_buf_calc_all()
2097 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_rx_buf_calc_all()
2099 priv->enable = 0; in hclge_rx_buf_calc_all()
2100 priv->wl.low = 0; in hclge_rx_buf_calc_all()
2101 priv->wl.high = 0; in hclge_rx_buf_calc_all()
2102 priv->buf_size = 0; in hclge_rx_buf_calc_all()
2104 if (!(hdev->hw_tc_map & BIT(i))) in hclge_rx_buf_calc_all()
2107 priv->enable = 1; in hclge_rx_buf_calc_all()
2109 if (hdev->tm_info.hw_pfc_map & BIT(i)) { in hclge_rx_buf_calc_all()
2110 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; in hclge_rx_buf_calc_all()
2111 priv->wl.high = roundup(priv->wl.low + aligned_mps, in hclge_rx_buf_calc_all()
2114 priv->wl.low = 0; in hclge_rx_buf_calc_all()
2115 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : in hclge_rx_buf_calc_all()
2119 priv->buf_size = priv->wl.high + hdev->dv_buf_size; in hclge_rx_buf_calc_all()
2128 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_nopfc_buf_till_fit()
2133 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { in hclge_drop_nopfc_buf_till_fit()
2134 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_drop_nopfc_buf_till_fit()
2137 if (hdev->hw_tc_map & mask && in hclge_drop_nopfc_buf_till_fit()
2138 !(hdev->tm_info.hw_pfc_map & mask)) { in hclge_drop_nopfc_buf_till_fit()
2140 priv->wl.low = 0; in hclge_drop_nopfc_buf_till_fit()
2141 priv->wl.high = 0; in hclge_drop_nopfc_buf_till_fit()
2142 priv->buf_size = 0; in hclge_drop_nopfc_buf_till_fit()
2143 priv->enable = 0; in hclge_drop_nopfc_buf_till_fit()
2144 no_pfc_priv_num--; in hclge_drop_nopfc_buf_till_fit()
2158 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_pfc_buf_till_fit()
2163 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { in hclge_drop_pfc_buf_till_fit()
2164 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_drop_pfc_buf_till_fit()
2167 if (hdev->hw_tc_map & mask && in hclge_drop_pfc_buf_till_fit()
2168 hdev->tm_info.hw_pfc_map & mask) { in hclge_drop_pfc_buf_till_fit()
2170 priv->wl.low = 0; in hclge_drop_pfc_buf_till_fit()
2171 priv->enable = 0; in hclge_drop_pfc_buf_till_fit()
2172 priv->wl.high = 0; in hclge_drop_pfc_buf_till_fit()
2173 priv->buf_size = 0; in hclge_drop_pfc_buf_till_fit()
2174 pfc_priv_num--; in hclge_drop_pfc_buf_till_fit()
2192 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_only_alloc_priv_buff()
2194 u32 half_mps = hdev->mps >> 1; in hclge_only_alloc_priv_buff()
2204 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + in hclge_only_alloc_priv_buff()
2212 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_only_alloc_priv_buff()
2214 priv->enable = 0; in hclge_only_alloc_priv_buff()
2215 priv->wl.low = 0; in hclge_only_alloc_priv_buff()
2216 priv->wl.high = 0; in hclge_only_alloc_priv_buff()
2217 priv->buf_size = 0; in hclge_only_alloc_priv_buff()
2219 if (!(hdev->hw_tc_map & BIT(i))) in hclge_only_alloc_priv_buff()
2222 priv->enable = 1; in hclge_only_alloc_priv_buff()
2223 priv->buf_size = rx_priv; in hclge_only_alloc_priv_buff()
2224 priv->wl.high = rx_priv - hdev->dv_buf_size; in hclge_only_alloc_priv_buff()
2225 priv->wl.low = priv->wl.high - PRIV_WL_GAP; in hclge_only_alloc_priv_buff()
2228 buf_alloc->s_buf.buf_size = 0; in hclge_only_alloc_priv_buff()
2243 u32 rx_all = hdev->pkt_buf_size; in hclge_rx_buffer_calc()
2245 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); in hclge_rx_buffer_calc()
2247 return -ENOMEM; in hclge_rx_buffer_calc()
2268 return -ENOMEM; in hclge_rx_buffer_calc()
2284 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_rx_priv_buf_alloc()
2286 req->buf_num[i] = in hclge_rx_priv_buf_alloc()
2287 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); in hclge_rx_priv_buf_alloc()
2288 req->buf_num[i] |= in hclge_rx_priv_buf_alloc()
2292 req->shared_buf = in hclge_rx_priv_buf_alloc()
2293 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | in hclge_rx_priv_buf_alloc()
2296 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_rx_priv_buf_alloc()
2298 dev_err(&hdev->pdev->dev, in hclge_rx_priv_buf_alloc()
2327 priv = &buf_alloc->priv_buf[idx]; in hclge_rx_priv_wl_config()
2328 req->tc_wl[j].high = in hclge_rx_priv_wl_config()
2329 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); in hclge_rx_priv_wl_config()
2330 req->tc_wl[j].high |= in hclge_rx_priv_wl_config()
2332 req->tc_wl[j].low = in hclge_rx_priv_wl_config()
2333 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); in hclge_rx_priv_wl_config()
2334 req->tc_wl[j].low |= in hclge_rx_priv_wl_config()
2340 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_rx_priv_wl_config()
2342 dev_err(&hdev->pdev->dev, in hclge_rx_priv_wl_config()
2351 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; in hclge_common_thrd_config()
2370 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; in hclge_common_thrd_config()
2372 req->com_thrd[j].high = in hclge_common_thrd_config()
2373 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); in hclge_common_thrd_config()
2374 req->com_thrd[j].high |= in hclge_common_thrd_config()
2376 req->com_thrd[j].low = in hclge_common_thrd_config()
2377 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); in hclge_common_thrd_config()
2378 req->com_thrd[j].low |= in hclge_common_thrd_config()
2384 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_common_thrd_config()
2386 dev_err(&hdev->pdev->dev, in hclge_common_thrd_config()
2394 struct hclge_shared_buf *buf = &buf_alloc->s_buf; in hclge_common_wl_config()
2402 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); in hclge_common_wl_config()
2403 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); in hclge_common_wl_config()
2405 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); in hclge_common_wl_config()
2406 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); in hclge_common_wl_config()
2408 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_common_wl_config()
2410 dev_err(&hdev->pdev->dev, in hclge_common_wl_config()
2423 return -ENOMEM; in hclge_buffer_alloc()
2427 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2434 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2441 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2449 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", in hclge_buffer_alloc()
2457 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2465 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2474 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2484 struct hnae3_handle *roce = &vport->roce; in hclge_init_roce_base_info()
2485 struct hnae3_handle *nic = &vport->nic; in hclge_init_roce_base_info()
2486 struct hclge_dev *hdev = vport->back; in hclge_init_roce_base_info()
2488 roce->rinfo.num_vectors = vport->back->num_roce_msi; in hclge_init_roce_base_info()
2490 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) in hclge_init_roce_base_info()
2491 return -EINVAL; in hclge_init_roce_base_info()
2493 roce->rinfo.base_vector = hdev->num_nic_msi; in hclge_init_roce_base_info()
2495 roce->rinfo.netdev = nic->kinfo.netdev; in hclge_init_roce_base_info()
2496 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; in hclge_init_roce_base_info()
2497 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; in hclge_init_roce_base_info()
2499 roce->pdev = nic->pdev; in hclge_init_roce_base_info()
2500 roce->ae_algo = nic->ae_algo; in hclge_init_roce_base_info()
2501 bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits, in hclge_init_roce_base_info()
2509 struct pci_dev *pdev = hdev->pdev; in hclge_init_msi()
2514 hdev->num_msi, in hclge_init_msi()
2517 dev_err(&pdev->dev, in hclge_init_msi()
2518 "failed(%d) to allocate MSI/MSI-X vectors\n", in hclge_init_msi()
2522 if (vectors < hdev->num_msi) in hclge_init_msi()
2523 dev_warn(&hdev->pdev->dev, in hclge_init_msi()
2524 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", in hclge_init_msi()
2525 hdev->num_msi, vectors); in hclge_init_msi()
2527 hdev->num_msi = vectors; in hclge_init_msi()
2528 hdev->num_msi_left = vectors; in hclge_init_msi()
2530 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2532 if (!hdev->vector_status) { in hclge_init_msi()
2534 return -ENOMEM; in hclge_init_msi()
2537 for (i = 0; i < hdev->num_msi; i++) in hclge_init_msi()
2538 hdev->vector_status[i] = HCLGE_INVALID_VPORT; in hclge_init_msi()
2540 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2542 if (!hdev->vector_irq) { in hclge_init_msi()
2544 return -ENOMEM; in hclge_init_msi()
2581 return -EINVAL; in hclge_convert_to_fw_speed()
2597 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); in hclge_cfg_mac_speed_dup_hw()
2601 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); in hclge_cfg_mac_speed_dup_hw()
2605 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S, in hclge_cfg_mac_speed_dup_hw()
2607 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, in hclge_cfg_mac_speed_dup_hw()
2609 req->lane_num = lane_num; in hclge_cfg_mac_speed_dup_hw()
2611 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_speed_dup_hw()
2613 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_speed_dup_hw()
2623 struct hclge_mac *mac = &hdev->hw.mac; in hclge_cfg_mac_speed_dup()
2627 if (!mac->support_autoneg && mac->speed == speed && in hclge_cfg_mac_speed_dup()
2628 mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0)) in hclge_cfg_mac_speed_dup()
2635 hdev->hw.mac.speed = speed; in hclge_cfg_mac_speed_dup()
2636 hdev->hw.mac.duplex = duplex; in hclge_cfg_mac_speed_dup()
2638 hdev->hw.mac.lane_num = lane_num; in hclge_cfg_mac_speed_dup()
2647 struct hclge_dev *hdev = vport->back; in hclge_cfg_mac_speed_dup_h()
2655 hdev->hw.mac.req_speed = speed; in hclge_cfg_mac_speed_dup_h()
2656 hdev->hw.mac.req_duplex = duplex; in hclge_cfg_mac_speed_dup_h()
2673 req->cfg_an_cmd_flag = cpu_to_le32(flag); in hclge_set_autoneg_en()
2675 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_autoneg_en()
2677 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", in hclge_set_autoneg_en()
2686 struct hclge_dev *hdev = vport->back; in hclge_set_autoneg()
2688 if (!hdev->hw.mac.support_autoneg) { in hclge_set_autoneg()
2690 dev_err(&hdev->pdev->dev, in hclge_set_autoneg()
2692 return -EOPNOTSUPP; in hclge_set_autoneg()
2704 struct hclge_dev *hdev = vport->back; in hclge_get_autoneg()
2705 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_autoneg()
2708 return phydev->autoneg; in hclge_get_autoneg()
2710 return hdev->hw.mac.autoneg; in hclge_get_autoneg()
2716 struct hclge_dev *hdev = vport->back; in hclge_restart_autoneg()
2719 dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); in hclge_restart_autoneg()
2730 struct hclge_dev *hdev = vport->back; in hclge_halt_autoneg()
2732 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) in hclge_halt_autoneg()
2755 hdev->fec_stats.per_lanes[i] += in hclge_parse_fec_stats_lanes()
2768 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num; in hclge_parse_fec_stats()
2769 hdev->fec_stats.rs_corr_blocks += in hclge_parse_fec_stats()
2770 le32_to_cpu(req->rs_fec_corr_blocks); in hclge_parse_fec_stats()
2771 hdev->fec_stats.rs_uncorr_blocks += in hclge_parse_fec_stats()
2772 le32_to_cpu(req->rs_fec_uncorr_blocks); in hclge_parse_fec_stats()
2773 hdev->fec_stats.rs_error_blocks += in hclge_parse_fec_stats()
2774 le32_to_cpu(req->rs_fec_error_blocks); in hclge_parse_fec_stats()
2775 hdev->fec_stats.base_r_corr_blocks += in hclge_parse_fec_stats()
2776 le32_to_cpu(req->base_r_fec_corr_blocks); in hclge_parse_fec_stats()
2777 hdev->fec_stats.base_r_uncorr_blocks += in hclge_parse_fec_stats()
2778 le32_to_cpu(req->base_r_fec_uncorr_blocks); in hclge_parse_fec_stats()
2780 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1); in hclge_parse_fec_stats()
2792 if (i != (HCLGE_FEC_STATS_CMD_NUM - 1)) in hclge_update_fec_stats_hw()
2796 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM); in hclge_update_fec_stats_hw()
2807 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_update_fec_stats()
2811 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state)) in hclge_update_fec_stats()
2816 dev_err(&hdev->pdev->dev, in hclge_update_fec_stats()
2819 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state); in hclge_update_fec_stats()
2825 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks; in hclge_get_fec_stats_total()
2826 fec_stats->uncorrectable_blocks.total = in hclge_get_fec_stats_total()
2827 hdev->fec_stats.rs_uncorr_blocks; in hclge_get_fec_stats_total()
2835 if (hdev->fec_stats.base_r_lane_num == 0 || in hclge_get_fec_stats_lanes()
2836 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) { in hclge_get_fec_stats_lanes()
2837 dev_err(&hdev->pdev->dev, in hclge_get_fec_stats_lanes()
2839 hdev->fec_stats.base_r_lane_num); in hclge_get_fec_stats_lanes()
2843 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) { in hclge_get_fec_stats_lanes()
2844 fec_stats->corrected_blocks.lanes[i] = in hclge_get_fec_stats_lanes()
2845 hdev->fec_stats.base_r_corr_per_lanes[i]; in hclge_get_fec_stats_lanes()
2846 fec_stats->uncorrectable_blocks.lanes[i] = in hclge_get_fec_stats_lanes()
2847 hdev->fec_stats.base_r_uncorr_per_lanes[i]; in hclge_get_fec_stats_lanes()
2854 u32 fec_mode = hdev->hw.mac.fec_mode; in hclge_comm_get_fec_stats()
2865 dev_err(&hdev->pdev->dev, in hclge_comm_get_fec_stats()
2876 struct hclge_dev *hdev = vport->back; in hclge_get_fec_stats()
2877 u32 fec_mode = hdev->hw.mac.fec_mode; in hclge_get_fec_stats()
2899 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); in hclge_set_fec_hw()
2901 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, in hclge_set_fec_hw()
2904 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, in hclge_set_fec_hw()
2907 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, in hclge_set_fec_hw()
2910 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fec_hw()
2912 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); in hclge_set_fec_hw()
2920 struct hclge_dev *hdev = vport->back; in hclge_set_fec()
2921 struct hclge_mac *mac = &hdev->hw.mac; in hclge_set_fec()
2924 if (fec_mode && !(mac->fec_ability & fec_mode)) { in hclge_set_fec()
2925 dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); in hclge_set_fec()
2926 return -EINVAL; in hclge_set_fec()
2933 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); in hclge_set_fec()
2941 struct hclge_dev *hdev = vport->back; in hclge_get_fec()
2942 struct hclge_mac *mac = &hdev->hw.mac; in hclge_get_fec()
2945 *fec_ability = mac->fec_ability; in hclge_get_fec()
2947 *fec_mode = mac->fec_mode; in hclge_get_fec()
2952 struct hclge_mac *mac = &hdev->hw.mac; in hclge_mac_init()
2955 hdev->support_sfp_query = true; in hclge_mac_init()
2957 if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_mac_init()
2958 hdev->hw.mac.duplex = HCLGE_MAC_FULL; in hclge_mac_init()
2960 if (hdev->hw.mac.support_autoneg) { in hclge_mac_init()
2961 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); in hclge_mac_init()
2966 if (!hdev->hw.mac.autoneg) { in hclge_mac_init()
2967 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed, in hclge_mac_init()
2968 hdev->hw.mac.req_duplex, in hclge_mac_init()
2969 hdev->hw.mac.lane_num); in hclge_mac_init()
2974 mac->link = 0; in hclge_mac_init()
2976 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { in hclge_mac_init()
2977 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); in hclge_mac_init()
2982 ret = hclge_set_mac_mtu(hdev, hdev->mps); in hclge_mac_init()
2984 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); in hclge_mac_init()
2994 dev_err(&hdev->pdev->dev, in hclge_mac_init()
3002 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_mbx_task_schedule()
3003 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { in hclge_mbx_task_schedule()
3004 hdev->last_mbx_scheduled = jiffies; in hclge_mbx_task_schedule()
3005 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_mbx_task_schedule()
3011 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_reset_task_schedule()
3012 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && in hclge_reset_task_schedule()
3013 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { in hclge_reset_task_schedule()
3014 hdev->last_rst_scheduled = jiffies; in hclge_reset_task_schedule()
3015 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_reset_task_schedule()
3021 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_errhand_task_schedule()
3022 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) in hclge_errhand_task_schedule()
3023 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_errhand_task_schedule()
3028 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_task_schedule()
3029 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) in hclge_task_schedule()
3030 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); in hclge_task_schedule()
3040 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_mac_link_status()
3042 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", in hclge_get_mac_link_status()
3048 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? in hclge_get_mac_link_status()
3056 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mac_phy_link()
3060 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) in hclge_get_mac_phy_link()
3063 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) in hclge_get_mac_phy_link()
3075 for (i = 0; i < pci_num_vf(hdev->pdev); i++) { in hclge_push_link_status()
3076 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; in hclge_push_link_status()
3078 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) || in hclge_push_link_status()
3079 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO) in hclge_push_link_status()
3084 dev_err(&hdev->pdev->dev, in hclge_push_link_status()
3093 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_update_link_status()
3094 struct hnae3_client *client = hdev->nic_client; in hclge_update_link_status()
3101 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) in hclge_update_link_status()
3106 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_update_link_status()
3110 if (state != hdev->hw.mac.link) { in hclge_update_link_status()
3111 hdev->hw.mac.link = state; in hclge_update_link_status()
3115 client->ops->link_status_change(handle, state); in hclge_update_link_status()
3118 if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) { in hclge_update_link_status()
3119 struct hnae3_handle *rhandle = &hdev->vport[0].roce; in hclge_update_link_status()
3120 struct hnae3_client *rclient = hdev->roce_client; in hclge_update_link_status()
3122 if (rclient && rclient->ops->link_status_change) in hclge_update_link_status()
3123 rclient->ops->link_status_change(rhandle, in hclge_update_link_status()
3130 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_update_link_status()
3137 if (hclge_get_speed_bit(mac->speed, &speed_ability)) in hclge_update_speed_advertising()
3140 switch (mac->module_type) { in hclge_update_speed_advertising()
3142 hclge_convert_setting_lr(speed_ability, mac->advertising); in hclge_update_speed_advertising()
3146 hclge_convert_setting_sr(speed_ability, mac->advertising); in hclge_update_speed_advertising()
3149 hclge_convert_setting_cr(speed_ability, mac->advertising); in hclge_update_speed_advertising()
3152 hclge_convert_setting_kr(speed_ability, mac->advertising); in hclge_update_speed_advertising()
3161 if (mac->fec_mode & BIT(HNAE3_FEC_RS)) in hclge_update_fec_advertising()
3163 mac->advertising); in hclge_update_fec_advertising()
3164 else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS)) in hclge_update_fec_advertising()
3166 mac->advertising); in hclge_update_fec_advertising()
3167 else if (mac->fec_mode & BIT(HNAE3_FEC_BASER)) in hclge_update_fec_advertising()
3169 mac->advertising); in hclge_update_fec_advertising()
3172 mac->advertising); in hclge_update_fec_advertising()
3177 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_pause_advertising()
3180 switch (hdev->fc_mode_last_time) { in hclge_update_pause_advertising()
3199 linkmode_set_pause(mac->advertising, tx_en, rx_en); in hclge_update_pause_advertising()
3204 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_advertising()
3206 linkmode_zero(mac->advertising); in hclge_update_advertising()
3221 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && in hclge_update_port_capability()
3222 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) in hclge_update_port_capability()
3223 mac->module_type = HNAE3_MODULE_TYPE_KR; in hclge_update_port_capability()
3224 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) in hclge_update_port_capability()
3225 mac->module_type = HNAE3_MODULE_TYPE_TP; in hclge_update_port_capability()
3227 if (mac->support_autoneg) { in hclge_update_port_capability()
3228 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); in hclge_update_port_capability()
3229 linkmode_copy(mac->advertising, mac->supported); in hclge_update_port_capability()
3232 mac->supported); in hclge_update_port_capability()
3245 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_speed()
3246 if (ret == -EOPNOTSUPP) { in hclge_get_sfp_speed()
3247 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_speed()
3251 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); in hclge_get_sfp_speed()
3255 *speed = le32_to_cpu(resp->speed); in hclge_get_sfp_speed()
3269 resp->query_type = QUERY_ACTIVE_SPEED; in hclge_get_sfp_info()
3271 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_info()
3272 if (ret == -EOPNOTSUPP) { in hclge_get_sfp_info()
3273 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_info()
3277 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); in hclge_get_sfp_info()
3282 * set to mac->speed. in hclge_get_sfp_info()
3284 if (!le32_to_cpu(resp->speed)) in hclge_get_sfp_info()
3287 mac->speed = le32_to_cpu(resp->speed); in hclge_get_sfp_info()
3288 /* if resp->speed_ability is 0, it means it's an old version in hclge_get_sfp_info()
3291 if (resp->speed_ability) { in hclge_get_sfp_info()
3292 mac->module_type = le32_to_cpu(resp->module_type); in hclge_get_sfp_info()
3293 mac->speed_ability = le32_to_cpu(resp->speed_ability); in hclge_get_sfp_info()
3294 mac->autoneg = resp->autoneg; in hclge_get_sfp_info()
3295 mac->support_autoneg = resp->autoneg_ability; in hclge_get_sfp_info()
3296 mac->speed_type = QUERY_ACTIVE_SPEED; in hclge_get_sfp_info()
3297 mac->lane_num = resp->lane_num; in hclge_get_sfp_info()
3298 if (!resp->active_fec) in hclge_get_sfp_info()
3299 mac->fec_mode = 0; in hclge_get_sfp_info()
3301 mac->fec_mode = BIT(resp->active_fec); in hclge_get_sfp_info()
3302 mac->fec_ability = resp->fec_ability; in hclge_get_sfp_info()
3304 mac->speed_type = QUERY_SFP_SPEED; in hclge_get_sfp_info()
3318 struct hclge_dev *hdev = vport->back; in hclge_get_phy_link_ksettings()
3327 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); in hclge_get_phy_link_ksettings()
3329 dev_err(&hdev->pdev->dev, in hclge_get_phy_link_ksettings()
3335 cmd->base.autoneg = req0->autoneg; in hclge_get_phy_link_ksettings()
3336 cmd->base.speed = le32_to_cpu(req0->speed); in hclge_get_phy_link_ksettings()
3337 cmd->base.duplex = req0->duplex; in hclge_get_phy_link_ksettings()
3338 cmd->base.port = req0->port; in hclge_get_phy_link_ksettings()
3339 cmd->base.transceiver = req0->transceiver; in hclge_get_phy_link_ksettings()
3340 cmd->base.phy_address = req0->phy_address; in hclge_get_phy_link_ksettings()
3341 cmd->base.eth_tp_mdix = req0->eth_tp_mdix; in hclge_get_phy_link_ksettings()
3342 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl; in hclge_get_phy_link_ksettings()
3343 supported = le32_to_cpu(req0->supported); in hclge_get_phy_link_ksettings()
3344 advertising = le32_to_cpu(req0->advertising); in hclge_get_phy_link_ksettings()
3345 lp_advertising = le32_to_cpu(req0->lp_advertising); in hclge_get_phy_link_ksettings()
3346 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, in hclge_get_phy_link_ksettings()
3348 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, in hclge_get_phy_link_ksettings()
3350 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, in hclge_get_phy_link_ksettings()
3354 cmd->base.master_slave_cfg = req1->master_slave_cfg; in hclge_get_phy_link_ksettings()
3355 cmd->base.master_slave_state = req1->master_slave_state; in hclge_get_phy_link_ksettings()
3368 struct hclge_dev *hdev = vport->back; in hclge_set_phy_link_ksettings()
3372 if (cmd->base.autoneg == AUTONEG_DISABLE && in hclge_set_phy_link_ksettings()
3373 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) || in hclge_set_phy_link_ksettings()
3374 (cmd->base.duplex != DUPLEX_HALF && in hclge_set_phy_link_ksettings()
3375 cmd->base.duplex != DUPLEX_FULL))) in hclge_set_phy_link_ksettings()
3376 return -EINVAL; in hclge_set_phy_link_ksettings()
3385 req0->autoneg = cmd->base.autoneg; in hclge_set_phy_link_ksettings()
3386 req0->speed = cpu_to_le32(cmd->base.speed); in hclge_set_phy_link_ksettings()
3387 req0->duplex = cmd->base.duplex; in hclge_set_phy_link_ksettings()
3389 cmd->link_modes.advertising); in hclge_set_phy_link_ksettings()
3390 req0->advertising = cpu_to_le32(advertising); in hclge_set_phy_link_ksettings()
3391 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; in hclge_set_phy_link_ksettings()
3394 req1->master_slave_cfg = cmd->base.master_slave_cfg; in hclge_set_phy_link_ksettings()
3396 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); in hclge_set_phy_link_ksettings()
3398 dev_err(&hdev->pdev->dev, in hclge_set_phy_link_ksettings()
3403 hdev->hw.mac.req_autoneg = cmd->base.autoneg; in hclge_set_phy_link_ksettings()
3404 hdev->hw.mac.req_speed = cmd->base.speed; in hclge_set_phy_link_ksettings()
3405 hdev->hw.mac.req_duplex = cmd->base.duplex; in hclge_set_phy_link_ksettings()
3406 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); in hclge_set_phy_link_ksettings()
3419 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); in hclge_update_tp_port_info()
3423 hdev->hw.mac.autoneg = cmd.base.autoneg; in hclge_update_tp_port_info()
3424 hdev->hw.mac.speed = cmd.base.speed; in hclge_update_tp_port_info()
3425 hdev->hw.mac.duplex = cmd.base.duplex; in hclge_update_tp_port_info()
3426 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising); in hclge_update_tp_port_info()
3438 cmd.base.autoneg = hdev->hw.mac.req_autoneg; in hclge_tp_port_init()
3439 cmd.base.speed = hdev->hw.mac.req_speed; in hclge_tp_port_init()
3440 cmd.base.duplex = hdev->hw.mac.req_duplex; in hclge_tp_port_init()
3441 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); in hclge_tp_port_init()
3443 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); in hclge_tp_port_init()
3448 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_port_info()
3453 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) in hclge_update_port_info()
3457 if (!hdev->support_sfp_query) in hclge_update_port_info()
3460 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_update_port_info()
3461 speed = mac->speed; in hclge_update_port_info()
3468 if (ret == -EOPNOTSUPP) { in hclge_update_port_info()
3469 hdev->support_sfp_query = false; in hclge_update_port_info()
3475 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_update_port_info()
3476 if (mac->speed_type == QUERY_ACTIVE_SPEED) { in hclge_update_port_info()
3478 if (mac->speed != speed) in hclge_update_port_info()
3482 return hclge_cfg_mac_speed_dup(hdev, mac->speed, in hclge_update_port_info()
3483 HCLGE_MAC_FULL, mac->lane_num); in hclge_update_port_info()
3496 struct hclge_dev *hdev = vport->back; in hclge_get_status()
3500 return hdev->hw.mac.link; in hclge_get_status()
3505 if (!pci_num_vf(hdev->pdev)) { in hclge_get_vf_vport()
3506 dev_err(&hdev->pdev->dev, in hclge_get_vf_vport()
3511 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { in hclge_get_vf_vport()
3512 dev_err(&hdev->pdev->dev, in hclge_get_vf_vport()
3514 vf, pci_num_vf(hdev->pdev)); in hclge_get_vf_vport()
3520 return &hdev->vport[vf]; in hclge_get_vf_vport()
3527 struct hclge_dev *hdev = vport->back; in hclge_get_vf_config()
3531 return -EINVAL; in hclge_get_vf_config()
3533 ivf->vf = vf; in hclge_get_vf_config()
3534 ivf->linkstate = vport->vf_info.link_state; in hclge_get_vf_config()
3535 ivf->spoofchk = vport->vf_info.spoofchk; in hclge_get_vf_config()
3536 ivf->trusted = vport->vf_info.trusted; in hclge_get_vf_config()
3537 ivf->min_tx_rate = 0; in hclge_get_vf_config()
3538 ivf->max_tx_rate = vport->vf_info.max_tx_rate; in hclge_get_vf_config()
3539 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag; in hclge_get_vf_config()
3540 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto); in hclge_get_vf_config()
3541 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos; in hclge_get_vf_config()
3542 ether_addr_copy(ivf->mac, vport->vf_info.mac); in hclge_get_vf_config()
3551 struct hclge_dev *hdev = vport->back; in hclge_set_vf_link_state()
3557 return -EINVAL; in hclge_set_vf_link_state()
3559 link_state_old = vport->vf_info.link_state; in hclge_set_vf_link_state()
3560 vport->vf_info.link_state = link_state; in hclge_set_vf_link_state()
3565 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) in hclge_set_vf_link_state()
3570 vport->vf_info.link_state = link_state_old; in hclge_set_vf_link_state()
3571 dev_err(&hdev->pdev->dev, in hclge_set_vf_link_state()
3586 set_bit(reset_type, &hdev->reset_pending); in hclge_set_reset_pending()
3594 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); in hclge_check_event_cause()
3595 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); in hclge_check_event_cause()
3596 hw_err_src_reg = hclge_read_dev(&hdev->hw, in hclge_check_event_cause()
3608 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); in hclge_check_event_cause()
3610 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_check_event_cause()
3612 hdev->rst_stats.imp_rst_cnt++; in hclge_check_event_cause()
3617 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); in hclge_check_event_cause()
3618 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_check_event_cause()
3621 hdev->rst_stats.global_rst_cnt++; in hclge_check_event_cause()
3644 dev_info(&hdev->pdev->dev, in hclge_check_event_cause()
3662 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); in hclge_clear_event_cause()
3665 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); in hclge_clear_event_cause()
3683 writel(enable ? 1 : 0, vector->addr); in hclge_enable_vector()
3693 hclge_enable_vector(&hdev->misc_vector, false); in hclge_misc_irq_handle()
3705 spin_lock_irqsave(&hdev->ptp->lock, flags); in hclge_misc_irq_handle()
3707 spin_unlock_irqrestore(&hdev->ptp->lock, flags); in hclge_misc_irq_handle()
3722 dev_warn(&hdev->pdev->dev, in hclge_misc_irq_handle()
3733 hclge_enable_vector(&hdev->misc_vector, true); in hclge_misc_irq_handle()
3740 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { in hclge_free_vector()
3741 dev_warn(&hdev->pdev->dev, in hclge_free_vector()
3746 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; in hclge_free_vector()
3747 hdev->num_msi_left += 1; in hclge_free_vector()
3748 hdev->num_msi_used -= 1; in hclge_free_vector()
3753 struct hclge_misc_vector *vector = &hdev->misc_vector; in hclge_get_misc_vector()
3755 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); in hclge_get_misc_vector()
3757 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; in hclge_get_misc_vector()
3758 hdev->vector_status[0] = 0; in hclge_get_misc_vector()
3760 hdev->num_msi_left -= 1; in hclge_get_misc_vector()
3761 hdev->num_msi_used += 1; in hclge_get_misc_vector()
3771 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", in hclge_misc_irq_init()
3772 HCLGE_NAME, pci_name(hdev->pdev)); in hclge_misc_irq_init()
3773 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, in hclge_misc_irq_init()
3774 IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev); in hclge_misc_irq_init()
3777 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", in hclge_misc_irq_init()
3778 hdev->misc_vector.vector_irq); in hclge_misc_irq_init()
3786 free_irq(hdev->misc_vector.vector_irq, hdev); in hclge_misc_irq_uninit()
3793 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_notify_client()
3794 struct hnae3_client *client = hdev->nic_client; in hclge_notify_client()
3797 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) in hclge_notify_client()
3800 if (!client->ops->reset_notify) in hclge_notify_client()
3801 return -EOPNOTSUPP; in hclge_notify_client()
3803 ret = client->ops->reset_notify(handle, type); in hclge_notify_client()
3805 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", in hclge_notify_client()
3814 struct hnae3_handle *handle = &hdev->vport[0].roce; in hclge_notify_roce_client()
3815 struct hnae3_client *client = hdev->roce_client; in hclge_notify_roce_client()
3818 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) in hclge_notify_roce_client()
3821 if (!client->ops->reset_notify) in hclge_notify_roce_client()
3822 return -EOPNOTSUPP; in hclge_notify_roce_client()
3824 ret = client->ops->reset_notify(handle, type); in hclge_notify_roce_client()
3826 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", in hclge_notify_roce_client()
3840 switch (hdev->reset_type) { in hclge_reset_wait()
3854 dev_err(&hdev->pdev->dev, in hclge_reset_wait()
3856 hdev->reset_type); in hclge_reset_wait()
3857 return -EINVAL; in hclge_reset_wait()
3860 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3863 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3868 dev_warn(&hdev->pdev->dev, in hclge_reset_wait()
3869 "Wait for reset timeout: %d\n", hdev->reset_type); in hclge_reset_wait()
3870 return -EBUSY; in hclge_reset_wait()
3883 req->dest_vfid = func_id; in hclge_set_vf_rst()
3886 req->vf_rst = 0x1; in hclge_set_vf_rst()
3888 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vf_rst()
3895 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { in hclge_set_all_vf_rst()
3896 struct hclge_vport *vport = &hdev->vport[i]; in hclge_set_all_vf_rst()
3900 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); in hclge_set_all_vf_rst()
3902 dev_err(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3904 vport->vport_id - HCLGE_VF_VPORT_START_NUM, in hclge_set_all_vf_rst()
3910 !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state)) in hclge_set_all_vf_rst()
3913 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) && in hclge_set_all_vf_rst()
3914 hdev->reset_type == HNAE3_FUNC_RESET) { in hclge_set_all_vf_rst()
3916 &vport->need_notify); in hclge_set_all_vf_rst()
3926 dev_warn(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3928 vport->vport_id - HCLGE_VF_VPORT_START_NUM, in hclge_set_all_vf_rst()
3937 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || in hclge_mailbox_service_task()
3938 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || in hclge_mailbox_service_task()
3939 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) in hclge_mailbox_service_task()
3942 if (time_is_before_jiffies(hdev->last_mbx_scheduled + in hclge_mailbox_service_task()
3944 dev_warn(&hdev->pdev->dev, in hclge_mailbox_service_task()
3946 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), in hclge_mailbox_service_task()
3951 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_mailbox_service_task()
3968 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_sync_vf()
3972 if (ret == -EOPNOTSUPP) { in hclge_func_reset_sync_vf()
3976 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n", in hclge_func_reset_sync_vf()
3979 } else if (req->all_vf_ready) { in hclge_func_reset_sync_vf()
3986 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); in hclge_func_reset_sync_vf()
3992 struct hnae3_client *client = hdev->nic_client; in hclge_report_hw_error()
3994 if (!client || !client->ops->process_hw_error || in hclge_report_hw_error()
3995 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) in hclge_report_hw_error()
3998 client->ops->process_hw_error(&hdev->vport[0].nic, type); in hclge_report_hw_error()
4005 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_handle_imp_error()
4009 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
4015 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
4026 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); in hclge_func_reset_cmd()
4027 req->fun_reset_vfid = func_id; in hclge_func_reset_cmd()
4029 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_cmd()
4031 dev_err(&hdev->pdev->dev, in hclge_func_reset_cmd()
4039 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_do_reset()
4040 struct pci_dev *pdev = hdev->pdev; in hclge_do_reset()
4044 dev_info(&pdev->dev, "hardware reset not finish\n"); in hclge_do_reset()
4045 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", in hclge_do_reset()
4046 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), in hclge_do_reset()
4047 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); in hclge_do_reset()
4051 switch (hdev->reset_type) { in hclge_do_reset()
4053 dev_info(&pdev->dev, "IMP reset requested\n"); in hclge_do_reset()
4054 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_do_reset()
4056 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); in hclge_do_reset()
4059 dev_info(&pdev->dev, "global reset requested\n"); in hclge_do_reset()
4060 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); in hclge_do_reset()
4062 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); in hclge_do_reset()
4065 dev_info(&pdev->dev, "PF reset requested\n"); in hclge_do_reset()
4071 dev_warn(&pdev->dev, in hclge_do_reset()
4072 "unsupported reset type: %d\n", hdev->reset_type); in hclge_do_reset()
4081 struct hclge_dev *hdev = ae_dev->priv; in hclge_get_reset_level()
4103 if (hdev->reset_type != HNAE3_NONE_RESET && in hclge_get_reset_level()
4104 rst_level < hdev->reset_type) in hclge_get_reset_level()
4114 switch (hdev->reset_type) { in hclge_clear_reset_cause()
4131 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_clear_reset_cause()
4132 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, in hclge_clear_reset_cause()
4135 hclge_enable_vector(&hdev->misc_vector, true); in hclge_clear_reset_cause()
4142 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); in hclge_reset_handshake()
4148 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); in hclge_reset_handshake()
4169 switch (hdev->reset_type) { in hclge_reset_prepare_wait()
4177 dev_err(&hdev->pdev->dev, in hclge_reset_prepare_wait()
4187 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_reset_prepare_wait()
4188 hdev->rst_stats.pf_rst_cnt++; in hclge_reset_prepare_wait()
4197 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_reset_prepare_wait()
4198 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, in hclge_reset_prepare_wait()
4208 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); in hclge_reset_prepare_wait()
4223 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf); in hclge_show_rst_info()
4232 if (hdev->reset_pending) { in hclge_reset_err_handle()
4233 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", in hclge_reset_err_handle()
4234 hdev->reset_pending); in hclge_reset_err_handle()
4236 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & in hclge_reset_err_handle()
4238 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
4242 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { in hclge_reset_err_handle()
4243 hdev->rst_stats.reset_fail_cnt++; in hclge_reset_err_handle()
4244 hclge_set_reset_pending(hdev, hdev->reset_type); in hclge_reset_err_handle()
4245 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
4246 "re-schedule reset task(%u)\n", in hclge_reset_err_handle()
4247 hdev->rst_stats.reset_fail_cnt); in hclge_reset_err_handle()
4256 dev_err(&hdev->pdev->dev, "Reset fail!\n"); in hclge_reset_err_handle()
4260 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_reset_err_handle()
4267 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_update_reset_level()
4274 hclge_get_reset_level(ae_dev, &hdev->reset_request); in hclge_update_reset_level()
4281 &hdev->default_reset_request); in hclge_update_reset_level()
4283 set_bit(reset_level, &hdev->reset_request); in hclge_update_reset_level()
4294 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; in hclge_set_rst_done()
4296 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rst_done()
4301 if (ret == -EOPNOTSUPP) { in hclge_set_rst_done()
4302 dev_warn(&hdev->pdev->dev, in hclge_set_rst_done()
4307 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", in hclge_set_rst_done()
4318 switch (hdev->reset_type) { in hclge_reset_prepare_up()
4331 /* clear up the handshake status after re-initialize done */ in hclge_reset_prepare_up()
4345 ret = hclge_reset_ae_dev(hdev->ae_dev); in hclge_reset_stack()
4356 hdev->rst_stats.reset_cnt++; in hclge_reset_prepare()
4375 hdev->rst_stats.hw_reset_done_cnt++; in hclge_reset_rebuild()
4390 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 in hclge_reset_rebuild()
4394 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) in hclge_reset_rebuild()
4411 hdev->last_reset_time = jiffies; in hclge_reset_rebuild()
4412 hdev->rst_stats.reset_fail_cnt = 0; in hclge_reset_rebuild()
4413 hdev->rst_stats.reset_done_cnt++; in hclge_reset_rebuild()
4414 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_reset_rebuild()
4442 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_event()
4456 * not allow it again before 3*HZ times. in hclge_reset_event()
4459 if (time_before(jiffies, (hdev->last_reset_time + in hclge_reset_event()
4461 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_reset_event()
4465 if (hdev->default_reset_request) { in hclge_reset_event()
4466 hdev->reset_level = in hclge_reset_event()
4468 &hdev->default_reset_request); in hclge_reset_event()
4469 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { in hclge_reset_event()
4470 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_reset_event()
4473 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", in hclge_reset_event()
4474 hdev->reset_level); in hclge_reset_event()
4477 set_bit(hdev->reset_level, &hdev->reset_request); in hclge_reset_event()
4480 if (hdev->reset_level < HNAE3_GLOBAL_RESET) in hclge_reset_event()
4481 hdev->reset_level++; in hclge_reset_event()
4491 struct hclge_dev *hdev = ae_dev->priv; in hclge_set_def_reset_request()
4495 set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request); in hclge_set_def_reset_request()
4496 dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n", in hclge_set_def_reset_request()
4501 set_bit(rst_type, &hdev->default_reset_request); in hclge_set_def_reset_request()
4511 if (!hdev->default_reset_request) in hclge_reset_timer()
4514 dev_info(&hdev->pdev->dev, in hclge_reset_timer()
4516 hclge_reset_event(hdev->pdev, NULL); in hclge_reset_timer()
4521 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_reset_subtask()
4529 * b. else, we can come back later to check this status so re-sched in hclge_reset_subtask()
4532 hdev->last_reset_time = jiffies; in hclge_reset_subtask()
4533 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); in hclge_reset_subtask()
4534 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
4538 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); in hclge_reset_subtask()
4539 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
4542 hdev->reset_type = HNAE3_NONE_RESET; in hclge_reset_subtask()
4547 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_handle_err_reset_request()
4550 if (ae_dev->hw_err_reset_req) { in hclge_handle_err_reset_request()
4552 &ae_dev->hw_err_reset_req); in hclge_handle_err_reset_request()
4556 if (hdev->default_reset_request && ae_dev->ops->reset_event) in hclge_handle_err_reset_request()
4557 ae_dev->ops->reset_event(hdev->pdev, NULL); in hclge_handle_err_reset_request()
4560 hclge_enable_vector(&hdev->misc_vector, true); in hclge_handle_err_reset_request()
4565 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_handle_err_recovery()
4567 ae_dev->hw_err_reset_req = 0; in hclge_handle_err_recovery()
4580 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_misc_err_recovery()
4581 struct device *dev = &hdev->pdev->dev; in hclge_misc_err_recovery()
4584 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); in hclge_misc_err_recovery()
4587 (hdev, &hdev->default_reset_request)) in hclge_misc_err_recovery()
4599 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) in hclge_errhand_service_task()
4610 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) in hclge_reset_service_task()
4613 if (time_is_before_jiffies(hdev->last_rst_scheduled + in hclge_reset_service_task()
4615 dev_warn(&hdev->pdev->dev, in hclge_reset_service_task()
4617 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), in hclge_reset_service_task()
4620 down(&hdev->reset_sem); in hclge_reset_service_task()
4621 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
4625 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
4626 up(&hdev->reset_sem); in hclge_reset_service_task()
4637 for (i = 1; i < hdev->num_alloc_vport; i++) { in hclge_update_vport_alive()
4638 struct hclge_vport *vport = &hdev->vport[i]; in hclge_update_vport_alive()
4640 if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) || in hclge_update_vport_alive()
4641 !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) in hclge_update_vport_alive()
4643 if (time_after(jiffies, vport->last_active_jiffies + in hclge_update_vport_alive()
4645 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_update_vport_alive()
4646 dev_warn(&hdev->pdev->dev, in hclge_update_vport_alive()
4648 i - HCLGE_VF_VPORT_START_NUM); in hclge_update_vport_alive()
4657 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) in hclge_periodic_service_task()
4668 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { in hclge_periodic_service_task()
4669 delta = jiffies - hdev->last_serv_processed; in hclge_periodic_service_task()
4672 delta = round_jiffies_relative(HZ) - delta; in hclge_periodic_service_task()
4677 hdev->serv_processed_cnt++; in hclge_periodic_service_task()
4680 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { in hclge_periodic_service_task()
4681 hdev->last_serv_processed = jiffies; in hclge_periodic_service_task()
4685 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) in hclge_periodic_service_task()
4691 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) in hclge_periodic_service_task()
4694 hdev->last_serv_processed = jiffies; in hclge_periodic_service_task()
4704 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) || in hclge_ptp_service_task()
4705 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) || in hclge_ptp_service_task()
4706 !time_is_before_jiffies(hdev->ptp->tx_start + HZ)) in hclge_ptp_service_task()
4710 spin_lock_irqsave(&hdev->ptp->lock, flags); in hclge_ptp_service_task()
4715 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) in hclge_ptp_service_task()
4718 spin_unlock_irqrestore(&hdev->ptp->lock, flags); in hclge_ptp_service_task()
4744 if (!handle->client) in hclge_get_vport()
4746 else if (handle->client->type == HNAE3_CLIENT_ROCE) in hclge_get_vport()
4757 vector_info->vector = pci_irq_vector(hdev->pdev, idx); in hclge_get_vector_info()
4760 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2) in hclge_get_vector_info()
4761 vector_info->io_addr = hdev->hw.hw.io_base + in hclge_get_vector_info()
4763 (idx - 1) * HCLGE_VECTOR_REG_OFFSET; in hclge_get_vector_info()
4765 vector_info->io_addr = hdev->hw.hw.io_base + in hclge_get_vector_info()
4767 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * in hclge_get_vector_info()
4769 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * in hclge_get_vector_info()
4772 hdev->vector_status[idx] = hdev->vport[0].vport_id; in hclge_get_vector_info()
4773 hdev->vector_irq[idx] = vector_info->vector; in hclge_get_vector_info()
4781 struct hclge_dev *hdev = vport->back; in hclge_get_vector()
4786 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); in hclge_get_vector()
4787 vector_num = min(hdev->num_msi_left, vector_num); in hclge_get_vector()
4790 while (++i < hdev->num_nic_msi) { in hclge_get_vector()
4791 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { in hclge_get_vector()
4800 hdev->num_msi_left -= alloc; in hclge_get_vector()
4801 hdev->num_msi_used += alloc; in hclge_get_vector()
4810 for (i = 0; i < hdev->num_msi; i++) in hclge_get_vector_index()
4811 if (vector == hdev->vector_irq[i]) in hclge_get_vector_index()
4814 return -EINVAL; in hclge_get_vector_index()
4820 struct hclge_dev *hdev = vport->back; in hclge_put_vector()
4825 dev_err(&hdev->pdev->dev, in hclge_put_vector()
4838 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); in hclge_get_rss()
4840 struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg; in hclge_get_rss()
4845 ae_dev->dev_specs.rss_ind_tbl_size); in hclge_get_rss()
4853 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); in hclge_set_rss()
4855 struct hclge_dev *hdev = vport->back; in hclge_set_rss()
4856 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; in hclge_set_rss()
4859 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc); in hclge_set_rss()
4861 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); in hclge_set_rss()
4866 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) in hclge_set_rss()
4867 rss_cfg->rss_indirection_tbl[i] = indir[i]; in hclge_set_rss()
4870 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw, in hclge_set_rss()
4871 rss_cfg->rss_indirection_tbl); in hclge_set_rss()
4878 struct hclge_dev *hdev = vport->back; in hclge_set_rss_tuple()
4881 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, in hclge_set_rss_tuple()
4882 &hdev->rss_cfg, nfc); in hclge_set_rss_tuple()
4884 dev_err(&hdev->pdev->dev, in hclge_set_rss_tuple()
4899 nfc->data = 0; in hclge_get_rss_tuple()
4901 ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type, in hclge_get_rss_tuple()
4906 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); in hclge_get_rss_tuple()
4914 struct hclge_dev *hdev = vport->back; in hclge_get_tc_size()
4916 return hdev->pf_rss_size_max; in hclge_get_tc_size()
4921 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclge_init_rss_tc_mode()
4922 struct hclge_vport *vport = hdev->vport; in hclge_init_rss_tc_mode()
4931 tc_info = &vport->nic.kinfo.tc_info; in hclge_init_rss_tc_mode()
4933 rss_size = tc_info->tqp_count[i]; in hclge_init_rss_tc_mode()
4936 if (!(hdev->hw_tc_map & BIT(i))) in hclge_init_rss_tc_mode()
4943 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size || in hclge_init_rss_tc_mode()
4945 dev_err(&hdev->pdev->dev, in hclge_init_rss_tc_mode()
4948 return -EINVAL; in hclge_init_rss_tc_mode()
4956 tc_offset[i] = tc_info->tqp_offset[i]; in hclge_init_rss_tc_mode()
4959 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, in hclge_init_rss_tc_mode()
4965 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl; in hclge_rss_init_hw()
4966 u8 *key = hdev->rss_cfg.rss_hash_key; in hclge_rss_init_hw()
4967 u8 hfunc = hdev->rss_cfg.rss_algo; in hclge_rss_init_hw()
4970 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, in hclge_rss_init_hw()
4975 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key); in hclge_rss_init_hw()
4979 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg); in hclge_rss_init_hw()
4990 struct hclge_dev *hdev = vport->back; in hclge_bind_ring_with_vector()
5002 req->int_vector_id_l = hnae3_get_field(vector_id, in hclge_bind_ring_with_vector()
5005 req->int_vector_id_h = hnae3_get_field(vector_id, in hclge_bind_ring_with_vector()
5010 for (node = ring_chain; node; node = node->next) { in hclge_bind_ring_with_vector()
5011 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); in hclge_bind_ring_with_vector()
5014 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); in hclge_bind_ring_with_vector()
5016 HCLGE_TQP_ID_S, node->tqp_index); in hclge_bind_ring_with_vector()
5019 hnae3_get_field(node->int_gl_idx, in hclge_bind_ring_with_vector()
5022 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); in hclge_bind_ring_with_vector()
5024 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; in hclge_bind_ring_with_vector()
5025 req->vfid = vport->vport_id; in hclge_bind_ring_with_vector()
5027 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
5029 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
5032 return -EIO; in hclge_bind_ring_with_vector()
5039 req->int_vector_id_l = in hclge_bind_ring_with_vector()
5043 req->int_vector_id_h = in hclge_bind_ring_with_vector()
5051 req->int_cause_num = i; in hclge_bind_ring_with_vector()
5052 req->vfid = vport->vport_id; in hclge_bind_ring_with_vector()
5053 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
5055 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
5057 return -EIO; in hclge_bind_ring_with_vector()
5068 struct hclge_dev *hdev = vport->back; in hclge_map_ring_to_vector()
5073 dev_err(&hdev->pdev->dev, in hclge_map_ring_to_vector()
5085 struct hclge_dev *hdev = vport->back; in hclge_unmap_ring_frm_vector()
5088 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_unmap_ring_frm_vector()
5093 dev_err(&handle->pdev->dev, in hclge_unmap_ring_frm_vector()
5100 dev_err(&handle->pdev->dev, in hclge_unmap_ring_frm_vector()
5110 struct hclge_vport *vport = &hdev->vport[vf_id]; in hclge_cmd_set_promisc_mode()
5111 struct hnae3_handle *handle = &vport->nic; in hclge_cmd_set_promisc_mode()
5121 req->vf_id = vf_id; in hclge_cmd_set_promisc_mode()
5123 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags)) in hclge_cmd_set_promisc_mode()
5132 req->extend_promisc = promisc_cfg; in hclge_cmd_set_promisc_mode()
5141 req->promisc = promisc_cfg; in hclge_cmd_set_promisc_mode()
5143 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_set_promisc_mode()
5145 dev_err(&hdev->pdev->dev, in hclge_cmd_set_promisc_mode()
5155 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id, in hclge_set_vport_promisc_mode()
5163 struct hclge_dev *hdev = vport->back; in hclge_set_promisc_mode()
5170 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_promisc_mode()
5171 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; in hclge_set_promisc_mode()
5181 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); in hclge_request_update_promisc_mode()
5186 if (hlist_empty(&hdev->fd_rule_list)) in hclge_sync_fd_state()
5187 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_sync_fd_state()
5192 if (!test_bit(location, hdev->fd_bmap)) { in hclge_fd_inc_rule_cnt()
5193 set_bit(location, hdev->fd_bmap); in hclge_fd_inc_rule_cnt()
5194 hdev->hclge_fd_rule_num++; in hclge_fd_inc_rule_cnt()
5200 if (test_bit(location, hdev->fd_bmap)) { in hclge_fd_dec_rule_cnt()
5201 clear_bit(location, hdev->fd_bmap); in hclge_fd_dec_rule_cnt()
5202 hdev->hclge_fd_rule_num--; in hclge_fd_dec_rule_cnt()
5209 hlist_del(&rule->rule_node); in hclge_fd_free_node()
5228 * 3) for it doesn't add a new node to the list, so it's in hclge_update_fd_rule_node()
5231 new_rule->rule_node.next = old_rule->rule_node.next; in hclge_update_fd_rule_node()
5232 new_rule->rule_node.pprev = old_rule->rule_node.pprev; in hclge_update_fd_rule_node()
5237 hclge_fd_dec_rule_cnt(hdev, old_rule->location); in hclge_update_fd_rule_node()
5248 * 3) the state of old rule is TO_ADD, it means the rule hasn't in hclge_update_fd_rule_node()
5252 if (old_rule->state == HCLGE_FD_TO_ADD) { in hclge_update_fd_rule_node()
5253 hclge_fd_dec_rule_cnt(hdev, old_rule->location); in hclge_update_fd_rule_node()
5257 old_rule->state = HCLGE_FD_TO_DEL; in hclge_update_fd_rule_node()
5270 if (rule->location == location) in hclge_find_fd_rule()
5272 else if (rule->location > location) in hclge_find_fd_rule()
5283 /* insert fd rule node in ascend order according to rule->location */
5288 INIT_HLIST_NODE(&rule->rule_node); in hclge_fd_insert_rule_node()
5291 hlist_add_behind(&rule->rule_node, &parent->rule_node); in hclge_fd_insert_rule_node()
5293 hlist_add_head(&rule->rule_node, hlist); in hclge_fd_insert_rule_node()
5311 req->ol2_cfg = cpu_to_le16(data); in hclge_fd_set_user_def_cmd()
5317 req->ol3_cfg = cpu_to_le16(data); in hclge_fd_set_user_def_cmd()
5323 req->ol4_cfg = cpu_to_le16(data); in hclge_fd_set_user_def_cmd()
5325 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_fd_set_user_def_cmd()
5327 dev_err(&hdev->pdev->dev, in hclge_fd_set_user_def_cmd()
5336 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) in hclge_sync_fd_user_def_cfg()
5340 spin_lock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_user_def_cfg()
5342 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); in hclge_sync_fd_user_def_cfg()
5344 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_sync_fd_user_def_cfg()
5347 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_user_def_cfg()
5353 struct hlist_head *hlist = &hdev->fd_rule_list; in hclge_fd_check_user_def_refcnt()
5358 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || in hclge_fd_check_user_def_refcnt()
5359 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) in hclge_fd_check_user_def_refcnt()
5363 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_check_user_def_refcnt()
5364 info = &rule->ep.user_def; in hclge_fd_check_user_def_refcnt()
5366 if (!cfg->ref_cnt || cfg->offset == info->offset) in hclge_fd_check_user_def_refcnt()
5369 if (cfg->ref_cnt > 1) in hclge_fd_check_user_def_refcnt()
5372 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent); in hclge_fd_check_user_def_refcnt()
5374 old_info = &fd_rule->ep.user_def; in hclge_fd_check_user_def_refcnt()
5375 if (info->layer == old_info->layer) in hclge_fd_check_user_def_refcnt()
5380 dev_err(&hdev->pdev->dev, in hclge_fd_check_user_def_refcnt()
5382 info->layer + 1); in hclge_fd_check_user_def_refcnt()
5383 return -ENOSPC; in hclge_fd_check_user_def_refcnt()
5391 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || in hclge_fd_inc_user_def_refcnt()
5392 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) in hclge_fd_inc_user_def_refcnt()
5395 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_inc_user_def_refcnt()
5396 if (!cfg->ref_cnt) { in hclge_fd_inc_user_def_refcnt()
5397 cfg->offset = rule->ep.user_def.offset; in hclge_fd_inc_user_def_refcnt()
5398 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_fd_inc_user_def_refcnt()
5400 cfg->ref_cnt++; in hclge_fd_inc_user_def_refcnt()
5408 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || in hclge_fd_dec_user_def_refcnt()
5409 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) in hclge_fd_dec_user_def_refcnt()
5412 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_dec_user_def_refcnt()
5413 if (!cfg->ref_cnt) in hclge_fd_dec_user_def_refcnt()
5416 cfg->ref_cnt--; in hclge_fd_dec_user_def_refcnt()
5417 if (!cfg->ref_cnt) { in hclge_fd_dec_user_def_refcnt()
5418 cfg->offset = 0; in hclge_fd_dec_user_def_refcnt()
5419 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_fd_dec_user_def_refcnt()
5427 struct hlist_head *hlist = &hdev->fd_rule_list; in hclge_update_fd_list()
5445 dev_warn(&hdev->pdev->dev, in hclge_update_fd_list()
5455 hclge_fd_inc_rule_cnt(hdev, new_rule->location); in hclge_update_fd_list()
5458 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_update_fd_list()
5473 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_mode()
5475 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); in hclge_get_fd_mode()
5479 *fd_mode = req->mode; in hclge_get_fd_mode()
5498 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_allocation()
5500 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", in hclge_get_fd_allocation()
5505 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); in hclge_get_fd_allocation()
5506 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); in hclge_get_fd_allocation()
5507 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); in hclge_get_fd_allocation()
5508 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); in hclge_get_fd_allocation()
5524 stage = &hdev->fd_cfg.key_cfg[stage_num]; in hclge_set_fd_key_config()
5525 req->stage = stage_num; in hclge_set_fd_key_config()
5526 req->key_select = stage->key_sel; in hclge_set_fd_key_config()
5527 req->inner_sipv6_word_en = stage->inner_sipv6_word_en; in hclge_set_fd_key_config()
5528 req->inner_dipv6_word_en = stage->inner_dipv6_word_en; in hclge_set_fd_key_config()
5529 req->outer_sipv6_word_en = stage->outer_sipv6_word_en; in hclge_set_fd_key_config()
5530 req->outer_dipv6_word_en = stage->outer_dipv6_word_en; in hclge_set_fd_key_config()
5531 req->tuple_mask = cpu_to_le32(~stage->tuple_active); in hclge_set_fd_key_config()
5532 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); in hclge_set_fd_key_config()
5534 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fd_key_config()
5536 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); in hclge_set_fd_key_config()
5543 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; in hclge_fd_disable_user_def()
5545 spin_lock_bh(&hdev->fd_rule_lock); in hclge_fd_disable_user_def()
5546 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); in hclge_fd_disable_user_def()
5547 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_fd_disable_user_def()
5558 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_init_fd_config()
5561 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
5565 switch (hdev->fd_cfg.fd_mode) { in hclge_init_fd_config()
5567 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; in hclge_init_fd_config()
5570 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; in hclge_init_fd_config()
5573 dev_err(&hdev->pdev->dev, in hclge_init_fd_config()
5575 hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
5576 return -EOPNOTSUPP; in hclge_init_fd_config()
5579 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; in hclge_init_fd_config()
5580 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE; in hclge_init_fd_config()
5581 key_cfg->inner_sipv6_word_en = LOW_2_WORDS; in hclge_init_fd_config()
5582 key_cfg->inner_dipv6_word_en = LOW_2_WORDS; in hclge_init_fd_config()
5583 key_cfg->outer_sipv6_word_en = 0; in hclge_init_fd_config()
5584 key_cfg->outer_dipv6_word_en = 0; in hclge_init_fd_config()
5586 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | in hclge_init_fd_config()
5591 /* If use max 400bit key, we can support tuples for ether type */ in hclge_init_fd_config()
5592 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { in hclge_init_fd_config()
5593 key_cfg->tuple_active |= in hclge_init_fd_config()
5595 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hclge_init_fd_config()
5596 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; in hclge_init_fd_config()
5602 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); in hclge_init_fd_config()
5605 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
5606 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], in hclge_init_fd_config()
5607 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
5608 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); in hclge_init_fd_config()
5621 struct hclge_desc desc[3]; in hclge_fd_tcam_config()
5634 req1->stage = stage; in hclge_fd_tcam_config()
5635 req1->xy_sel = sel_x ? 1 : 0; in hclge_fd_tcam_config()
5636 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); in hclge_fd_tcam_config()
5637 req1->index = cpu_to_le32(loc); in hclge_fd_tcam_config()
5638 req1->entry_vld = sel_x ? is_add : 0; in hclge_fd_tcam_config()
5641 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); in hclge_fd_tcam_config()
5642 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], in hclge_fd_tcam_config()
5643 sizeof(req2->tcam_data)); in hclge_fd_tcam_config()
5644 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + in hclge_fd_tcam_config()
5645 sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); in hclge_fd_tcam_config()
5648 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_fd_tcam_config()
5650 dev_err(&hdev->pdev->dev, in hclge_fd_tcam_config()
5660 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_fd_ad_config()
5669 req->index = cpu_to_le32(loc); in hclge_fd_ad_config()
5670 req->stage = stage; in hclge_fd_ad_config()
5673 action->write_rule_id_to_bd); in hclge_fd_ad_config()
5675 action->rule_id); in hclge_fd_ad_config()
5676 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { in hclge_fd_ad_config()
5678 action->override_tc); in hclge_fd_ad_config()
5680 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size); in hclge_fd_ad_config()
5683 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); in hclge_fd_ad_config()
5685 action->forward_to_direct_queue); in hclge_fd_ad_config()
5687 action->queue_id); in hclge_fd_ad_config()
5688 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); in hclge_fd_ad_config()
5690 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); in hclge_fd_ad_config()
5691 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); in hclge_fd_ad_config()
5693 action->counter_id); in hclge_fd_ad_config()
5695 req->ad_data = cpu_to_le64(ad_data); in hclge_fd_ad_config()
5696 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_fd_ad_config()
5698 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); in hclge_fd_ad_config()
5713 if (rule->unused_tuple & BIT(tuple_bit)) in hclge_fd_convert_tuple()
5742 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i], in hclge_fd_convert_tuple()
5744 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i], in hclge_fd_convert_tuple()
5794 tuple_bit = key_cfg->meta_data_active & BIT(i); in hclge_fd_convert_meta_data()
5803 rule->vf_id, 0); in hclge_fd_convert_meta_data()
5816 shift_bits = sizeof(meta_data) * 8 - cur_pos; in hclge_fd_convert_meta_data()
5829 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; in hclge_config_key()
5846 if (!(key_cfg->tuple_active & BIT(i))) in hclge_config_key()
5857 meta_data_region = hdev->fd_cfg.max_key_length / 8 - in hclge_config_key()
5865 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, in hclge_config_key()
5868 dev_err(&hdev->pdev->dev, in hclge_config_key()
5870 rule->queue_id, ret); in hclge_config_key()
5874 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, in hclge_config_key()
5877 dev_err(&hdev->pdev->dev, in hclge_config_key()
5879 rule->queue_id, ret); in hclge_config_key()
5886 struct hclge_vport *vport = hdev->vport; in hclge_config_action()
5887 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; in hclge_config_action()
5891 ad_data.ad_id = rule->location; in hclge_config_action()
5893 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { in hclge_config_action()
5895 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) { in hclge_config_action()
5898 kinfo->tc_info.tqp_offset[rule->cls_flower.tc]; in hclge_config_action()
5900 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]); in hclge_config_action()
5903 ad_data.queue_id = rule->queue_id; in hclge_config_action()
5906 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) { in hclge_config_action()
5908 ad_data.counter_id = rule->vf_id % in hclge_config_action()
5909 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; in hclge_config_action()
5919 ad_data.rule_id = rule->location; in hclge_config_action()
5928 return -EINVAL; in hclge_fd_check_tcpip4_tuple()
5932 if (!spec->ip4src) in hclge_fd_check_tcpip4_tuple()
5935 if (!spec->ip4dst) in hclge_fd_check_tcpip4_tuple()
5938 if (!spec->psrc) in hclge_fd_check_tcpip4_tuple()
5941 if (!spec->pdst) in hclge_fd_check_tcpip4_tuple()
5944 if (!spec->tos) in hclge_fd_check_tcpip4_tuple()
5954 return -EINVAL; in hclge_fd_check_ip4_tuple()
5959 if (!spec->ip4src) in hclge_fd_check_ip4_tuple()
5962 if (!spec->ip4dst) in hclge_fd_check_ip4_tuple()
5965 if (!spec->tos) in hclge_fd_check_ip4_tuple()
5968 if (!spec->proto) in hclge_fd_check_ip4_tuple()
5971 if (spec->l4_4_bytes) in hclge_fd_check_ip4_tuple()
5972 return -EOPNOTSUPP; in hclge_fd_check_ip4_tuple()
5974 if (spec->ip_ver != ETH_RX_NFC_IP4) in hclge_fd_check_ip4_tuple()
5975 return -EOPNOTSUPP; in hclge_fd_check_ip4_tuple()
5984 return -EINVAL; in hclge_fd_check_tcpip6_tuple()
5989 if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) in hclge_fd_check_tcpip6_tuple()
5992 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) in hclge_fd_check_tcpip6_tuple()
5995 if (!spec->psrc) in hclge_fd_check_tcpip6_tuple()
5998 if (!spec->pdst) in hclge_fd_check_tcpip6_tuple()
6001 if (!spec->tclass) in hclge_fd_check_tcpip6_tuple()
6011 return -EINVAL; in hclge_fd_check_ip6_tuple()
6017 if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) in hclge_fd_check_ip6_tuple()
6020 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) in hclge_fd_check_ip6_tuple()
6023 if (!spec->l4_proto) in hclge_fd_check_ip6_tuple()
6026 if (!spec->tclass) in hclge_fd_check_ip6_tuple()
6029 if (spec->l4_4_bytes) in hclge_fd_check_ip6_tuple()
6030 return -EOPNOTSUPP; in hclge_fd_check_ip6_tuple()
6038 return -EINVAL; in hclge_fd_check_ether_tuple()
6044 if (is_zero_ether_addr(spec->h_source)) in hclge_fd_check_ether_tuple()
6047 if (is_zero_ether_addr(spec->h_dest)) in hclge_fd_check_ether_tuple()
6050 if (!spec->h_proto) in hclge_fd_check_ether_tuple()
6060 if (fs->flow_type & FLOW_EXT) { in hclge_fd_check_ext_tuple()
6061 if (fs->h_ext.vlan_etype) { in hclge_fd_check_ext_tuple()
6062 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); in hclge_fd_check_ext_tuple()
6063 return -EOPNOTSUPP; in hclge_fd_check_ext_tuple()
6066 if (!fs->h_ext.vlan_tci) in hclge_fd_check_ext_tuple()
6069 if (fs->m_ext.vlan_tci && in hclge_fd_check_ext_tuple()
6070 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) { in hclge_fd_check_ext_tuple()
6071 dev_err(&hdev->pdev->dev, in hclge_fd_check_ext_tuple()
6073 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1); in hclge_fd_check_ext_tuple()
6074 return -EINVAL; in hclge_fd_check_ext_tuple()
6080 if (fs->flow_type & FLOW_MAC_EXT) { in hclge_fd_check_ext_tuple()
6081 if (hdev->fd_cfg.fd_mode != in hclge_fd_check_ext_tuple()
6083 dev_err(&hdev->pdev->dev, in hclge_fd_check_ext_tuple()
6085 return -EOPNOTSUPP; in hclge_fd_check_ext_tuple()
6088 if (is_zero_ether_addr(fs->h_ext.h_dest)) in hclge_fd_check_ext_tuple()
6102 info->layer = HCLGE_FD_USER_DEF_L2; in hclge_fd_get_user_def_layer()
6107 info->layer = HCLGE_FD_USER_DEF_L3; in hclge_fd_get_user_def_layer()
6114 info->layer = HCLGE_FD_USER_DEF_L4; in hclge_fd_get_user_def_layer()
6118 return -EOPNOTSUPP; in hclge_fd_get_user_def_layer()
6126 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0; in hclge_fd_is_user_def_all_masked()
6134 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; in hclge_fd_parse_user_def_field()
6135 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); in hclge_fd_parse_user_def_field()
6139 info->layer = HCLGE_FD_USER_DEF_NONE; in hclge_fd_parse_user_def_field()
6142 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs)) in hclge_fd_parse_user_def_field()
6145 /* user-def data from ethtool is 64 bit value, the bit0~15 is used in hclge_fd_parse_user_def_field()
6148 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; in hclge_fd_parse_user_def_field()
6149 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; in hclge_fd_parse_user_def_field()
6150 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; in hclge_fd_parse_user_def_field()
6151 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; in hclge_fd_parse_user_def_field()
6154 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); in hclge_fd_parse_user_def_field()
6155 return -EOPNOTSUPP; in hclge_fd_parse_user_def_field()
6159 dev_err(&hdev->pdev->dev, in hclge_fd_parse_user_def_field()
6160 "user-def offset[%u] should be no more than %u\n", in hclge_fd_parse_user_def_field()
6162 return -EINVAL; in hclge_fd_parse_user_def_field()
6166 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); in hclge_fd_parse_user_def_field()
6167 return -EINVAL; in hclge_fd_parse_user_def_field()
6172 dev_err(&hdev->pdev->dev, in hclge_fd_parse_user_def_field()
6173 "unsupported flow type for user-def bytes, ret = %d\n", in hclge_fd_parse_user_def_field()
6178 info->data = data; in hclge_fd_parse_user_def_field()
6179 info->data_mask = data_mask; in hclge_fd_parse_user_def_field()
6180 info->offset = offset; in hclge_fd_parse_user_def_field()
6193 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_fd_check_spec()
6194 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6196 fs->location, in hclge_fd_check_spec()
6197 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); in hclge_fd_check_spec()
6198 return -EINVAL; in hclge_fd_check_spec()
6205 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); in hclge_fd_check_spec()
6210 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec, in hclge_fd_check_spec()
6214 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec, in hclge_fd_check_spec()
6220 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec, in hclge_fd_check_spec()
6224 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec, in hclge_fd_check_spec()
6228 if (hdev->fd_cfg.fd_mode != in hclge_fd_check_spec()
6230 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6232 return -EOPNOTSUPP; in hclge_fd_check_spec()
6235 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec, in hclge_fd_check_spec()
6239 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6242 return -EOPNOTSUPP; in hclge_fd_check_spec()
6246 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6258 rule->tuples.src_ip[IPV4_INDEX] = in hclge_fd_get_tcpip4_tuple()
6259 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); in hclge_fd_get_tcpip4_tuple()
6260 rule->tuples_mask.src_ip[IPV4_INDEX] = in hclge_fd_get_tcpip4_tuple()
6261 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); in hclge_fd_get_tcpip4_tuple()
6263 rule->tuples.dst_ip[IPV4_INDEX] = in hclge_fd_get_tcpip4_tuple()
6264 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); in hclge_fd_get_tcpip4_tuple()
6265 rule->tuples_mask.dst_ip[IPV4_INDEX] = in hclge_fd_get_tcpip4_tuple()
6266 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); in hclge_fd_get_tcpip4_tuple()
6268 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); in hclge_fd_get_tcpip4_tuple()
6269 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); in hclge_fd_get_tcpip4_tuple()
6271 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); in hclge_fd_get_tcpip4_tuple()
6272 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); in hclge_fd_get_tcpip4_tuple()
6274 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; in hclge_fd_get_tcpip4_tuple()
6275 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; in hclge_fd_get_tcpip4_tuple()
6277 rule->tuples.ether_proto = ETH_P_IP; in hclge_fd_get_tcpip4_tuple()
6278 rule->tuples_mask.ether_proto = 0xFFFF; in hclge_fd_get_tcpip4_tuple()
6280 rule->tuples.ip_proto = ip_proto; in hclge_fd_get_tcpip4_tuple()
6281 rule->tuples_mask.ip_proto = 0xFF; in hclge_fd_get_tcpip4_tuple()
6287 rule->tuples.src_ip[IPV4_INDEX] = in hclge_fd_get_ip4_tuple()
6288 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); in hclge_fd_get_ip4_tuple()
6289 rule->tuples_mask.src_ip[IPV4_INDEX] = in hclge_fd_get_ip4_tuple()
6290 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); in hclge_fd_get_ip4_tuple()
6292 rule->tuples.dst_ip[IPV4_INDEX] = in hclge_fd_get_ip4_tuple()
6293 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); in hclge_fd_get_ip4_tuple()
6294 rule->tuples_mask.dst_ip[IPV4_INDEX] = in hclge_fd_get_ip4_tuple()
6295 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); in hclge_fd_get_ip4_tuple()
6297 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; in hclge_fd_get_ip4_tuple()
6298 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; in hclge_fd_get_ip4_tuple()
6300 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; in hclge_fd_get_ip4_tuple()
6301 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; in hclge_fd_get_ip4_tuple()
6303 rule->tuples.ether_proto = ETH_P_IP; in hclge_fd_get_ip4_tuple()
6304 rule->tuples_mask.ether_proto = 0xFFFF; in hclge_fd_get_ip4_tuple()
6310 ipv6_addr_be32_to_cpu(rule->tuples.src_ip, in hclge_fd_get_tcpip6_tuple()
6311 fs->h_u.tcp_ip6_spec.ip6src); in hclge_fd_get_tcpip6_tuple()
6312 ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip, in hclge_fd_get_tcpip6_tuple()
6313 fs->m_u.tcp_ip6_spec.ip6src); in hclge_fd_get_tcpip6_tuple()
6315 ipv6_addr_be32_to_cpu(rule->tuples.dst_ip, in hclge_fd_get_tcpip6_tuple()
6316 fs->h_u.tcp_ip6_spec.ip6dst); in hclge_fd_get_tcpip6_tuple()
6317 ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip, in hclge_fd_get_tcpip6_tuple()
6318 fs->m_u.tcp_ip6_spec.ip6dst); in hclge_fd_get_tcpip6_tuple()
6320 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); in hclge_fd_get_tcpip6_tuple()
6321 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); in hclge_fd_get_tcpip6_tuple()
6323 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); in hclge_fd_get_tcpip6_tuple()
6324 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); in hclge_fd_get_tcpip6_tuple()
6326 rule->tuples.ether_proto = ETH_P_IPV6; in hclge_fd_get_tcpip6_tuple()
6327 rule->tuples_mask.ether_proto = 0xFFFF; in hclge_fd_get_tcpip6_tuple()
6329 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; in hclge_fd_get_tcpip6_tuple()
6330 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; in hclge_fd_get_tcpip6_tuple()
6332 rule->tuples.ip_proto = ip_proto; in hclge_fd_get_tcpip6_tuple()
6333 rule->tuples_mask.ip_proto = 0xFF; in hclge_fd_get_tcpip6_tuple()
6339 ipv6_addr_be32_to_cpu(rule->tuples.src_ip, in hclge_fd_get_ip6_tuple()
6340 fs->h_u.usr_ip6_spec.ip6src); in hclge_fd_get_ip6_tuple()
6341 ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip, in hclge_fd_get_ip6_tuple()
6342 fs->m_u.usr_ip6_spec.ip6src); in hclge_fd_get_ip6_tuple()
6344 ipv6_addr_be32_to_cpu(rule->tuples.dst_ip, in hclge_fd_get_ip6_tuple()
6345 fs->h_u.usr_ip6_spec.ip6dst); in hclge_fd_get_ip6_tuple()
6346 ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip, in hclge_fd_get_ip6_tuple()
6347 fs->m_u.usr_ip6_spec.ip6dst); in hclge_fd_get_ip6_tuple()
6349 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; in hclge_fd_get_ip6_tuple()
6350 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; in hclge_fd_get_ip6_tuple()
6352 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; in hclge_fd_get_ip6_tuple()
6353 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; in hclge_fd_get_ip6_tuple()
6355 rule->tuples.ether_proto = ETH_P_IPV6; in hclge_fd_get_ip6_tuple()
6356 rule->tuples_mask.ether_proto = 0xFFFF; in hclge_fd_get_ip6_tuple()
6362 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source); in hclge_fd_get_ether_tuple()
6363 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source); in hclge_fd_get_ether_tuple()
6365 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest); in hclge_fd_get_ether_tuple()
6366 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest); in hclge_fd_get_ether_tuple()
6368 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto); in hclge_fd_get_ether_tuple()
6369 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto); in hclge_fd_get_ether_tuple()
6375 switch (info->layer) { in hclge_fd_get_user_def_tuple()
6377 rule->tuples.l2_user_def = info->data; in hclge_fd_get_user_def_tuple()
6378 rule->tuples_mask.l2_user_def = info->data_mask; in hclge_fd_get_user_def_tuple()
6381 rule->tuples.l3_user_def = info->data; in hclge_fd_get_user_def_tuple()
6382 rule->tuples_mask.l3_user_def = info->data_mask; in hclge_fd_get_user_def_tuple()
6385 rule->tuples.l4_user_def = (u32)info->data << 16; in hclge_fd_get_user_def_tuple()
6386 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16; in hclge_fd_get_user_def_tuple()
6392 rule->ep.user_def = *info; in hclge_fd_get_user_def_tuple()
6399 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); in hclge_fd_get_tuple()
6430 return -EOPNOTSUPP; in hclge_fd_get_tuple()
6433 if (fs->flow_type & FLOW_EXT) { in hclge_fd_get_tuple()
6434 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); in hclge_fd_get_tuple()
6435 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); in hclge_fd_get_tuple()
6439 if (fs->flow_type & FLOW_MAC_EXT) { in hclge_fd_get_tuple()
6440 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); in hclge_fd_get_tuple()
6441 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); in hclge_fd_get_tuple()
6464 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6466 if (hdev->fd_active_type != rule->rule_type && in hclge_add_fd_entry_common()
6467 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || in hclge_add_fd_entry_common()
6468 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { in hclge_add_fd_entry_common()
6469 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry_common()
6471 rule->rule_type, hdev->fd_active_type); in hclge_add_fd_entry_common()
6472 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6473 return -EINVAL; in hclge_add_fd_entry_common()
6488 rule->state = HCLGE_FD_ACTIVE; in hclge_add_fd_entry_common()
6489 hdev->fd_active_type = rule->rule_type; in hclge_add_fd_entry_common()
6490 hclge_update_fd_list(hdev, rule->state, rule->location, rule); in hclge_add_fd_entry_common()
6493 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6500 struct hclge_dev *hdev = vport->back; in hclge_is_cls_flower_active()
6502 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; in hclge_is_cls_flower_active()
6508 struct hclge_vport *vport = hdev->vport; in hclge_fd_parse_ring_cookie()
6520 if (vf > hdev->num_req_vfs) { in hclge_fd_parse_ring_cookie()
6521 dev_err(&hdev->pdev->dev, in hclge_fd_parse_ring_cookie()
6523 vf - 1U, hdev->num_req_vfs); in hclge_fd_parse_ring_cookie()
6524 return -EINVAL; in hclge_fd_parse_ring_cookie()
6527 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; in hclge_fd_parse_ring_cookie()
6528 tqps = hdev->vport[vf].nic.kinfo.num_tqps; in hclge_fd_parse_ring_cookie()
6531 dev_err(&hdev->pdev->dev, in hclge_fd_parse_ring_cookie()
6533 ring, tqps - 1U); in hclge_fd_parse_ring_cookie()
6534 return -EINVAL; in hclge_fd_parse_ring_cookie()
6548 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry()
6557 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_add_fd_entry()
6558 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
6560 return -EOPNOTSUPP; in hclge_add_fd_entry()
6563 if (!hdev->fd_en) { in hclge_add_fd_entry()
6564 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
6566 return -EOPNOTSUPP; in hclge_add_fd_entry()
6569 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; in hclge_add_fd_entry()
6575 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, in hclge_add_fd_entry()
6582 return -ENOMEM; in hclge_add_fd_entry()
6590 rule->flow_type = fs->flow_type; in hclge_add_fd_entry()
6591 rule->location = fs->location; in hclge_add_fd_entry()
6592 rule->unused_tuple = unused; in hclge_add_fd_entry()
6593 rule->vf_id = dst_vport_id; in hclge_add_fd_entry()
6594 rule->queue_id = q_index; in hclge_add_fd_entry()
6595 rule->action = action; in hclge_add_fd_entry()
6596 rule->rule_type = HCLGE_FD_EP_ACTIVE; in hclge_add_fd_entry()
6609 struct hclge_dev *hdev = vport->back; in hclge_del_fd_entry()
6613 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_fd_entry()
6614 return -EOPNOTSUPP; in hclge_del_fd_entry()
6616 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; in hclge_del_fd_entry()
6618 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_del_fd_entry()
6619 return -EINVAL; in hclge_del_fd_entry()
6621 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6622 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || in hclge_del_fd_entry()
6623 !test_bit(fs->location, hdev->fd_bmap)) { in hclge_del_fd_entry()
6624 dev_err(&hdev->pdev->dev, in hclge_del_fd_entry()
6625 "Delete fail, rule %u is inexistent\n", fs->location); in hclge_del_fd_entry()
6626 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6627 return -ENOENT; in hclge_del_fd_entry()
6630 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, in hclge_del_fd_entry()
6635 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); in hclge_del_fd_entry()
6638 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6649 spin_lock_bh(&hdev->fd_rule_lock); in hclge_clear_fd_rules_in_list()
6651 for_each_set_bit(location, hdev->fd_bmap, in hclge_clear_fd_rules_in_list()
6652 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_clear_fd_rules_in_list()
6657 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, in hclge_clear_fd_rules_in_list()
6659 hlist_del(&rule->rule_node); in hclge_clear_fd_rules_in_list()
6662 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_clear_fd_rules_in_list()
6663 hdev->hclge_fd_rule_num = 0; in hclge_clear_fd_rules_in_list()
6664 bitmap_zero(hdev->fd_bmap, in hclge_clear_fd_rules_in_list()
6665 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); in hclge_clear_fd_rules_in_list()
6668 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_clear_fd_rules_in_list()
6673 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_all_fd_entries()
6683 struct hclge_dev *hdev = vport->back; in hclge_restore_fd_entries()
6691 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_restore_fd_entries()
6695 if (!hdev->fd_en) in hclge_restore_fd_entries()
6698 spin_lock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
6699 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_restore_fd_entries()
6700 if (rule->state == HCLGE_FD_ACTIVE) in hclge_restore_fd_entries()
6701 rule->state = HCLGE_FD_TO_ADD; in hclge_restore_fd_entries()
6703 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
6704 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_restore_fd_entries()
6713 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_cnt()
6715 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle)) in hclge_get_fd_rule_cnt()
6716 return -EOPNOTSUPP; in hclge_get_fd_rule_cnt()
6718 cmd->rule_cnt = hdev->hclge_fd_rule_num; in hclge_get_fd_rule_cnt()
6719 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_fd_rule_cnt()
6728 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); in hclge_fd_get_tcpip4_info()
6729 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? in hclge_fd_get_tcpip4_info()
6730 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); in hclge_fd_get_tcpip4_info()
6732 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); in hclge_fd_get_tcpip4_info()
6733 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? in hclge_fd_get_tcpip4_info()
6734 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); in hclge_fd_get_tcpip4_info()
6736 spec->psrc = cpu_to_be16(rule->tuples.src_port); in hclge_fd_get_tcpip4_info()
6737 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? in hclge_fd_get_tcpip4_info()
6738 0 : cpu_to_be16(rule->tuples_mask.src_port); in hclge_fd_get_tcpip4_info()
6740 spec->pdst = cpu_to_be16(rule->tuples.dst_port); in hclge_fd_get_tcpip4_info()
6741 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? in hclge_fd_get_tcpip4_info()
6742 0 : cpu_to_be16(rule->tuples_mask.dst_port); in hclge_fd_get_tcpip4_info()
6744 spec->tos = rule->tuples.ip_tos; in hclge_fd_get_tcpip4_info()
6745 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? in hclge_fd_get_tcpip4_info()
6746 0 : rule->tuples_mask.ip_tos; in hclge_fd_get_tcpip4_info()
6753 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); in hclge_fd_get_ip4_info()
6754 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? in hclge_fd_get_ip4_info()
6755 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); in hclge_fd_get_ip4_info()
6757 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); in hclge_fd_get_ip4_info()
6758 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? in hclge_fd_get_ip4_info()
6759 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); in hclge_fd_get_ip4_info()
6761 spec->tos = rule->tuples.ip_tos; in hclge_fd_get_ip4_info()
6762 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? in hclge_fd_get_ip4_info()
6763 0 : rule->tuples_mask.ip_tos; in hclge_fd_get_ip4_info()
6765 spec->proto = rule->tuples.ip_proto; in hclge_fd_get_ip4_info()
6766 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? in hclge_fd_get_ip4_info()
6767 0 : rule->tuples_mask.ip_proto; in hclge_fd_get_ip4_info()
6769 spec->ip_ver = ETH_RX_NFC_IP4; in hclge_fd_get_ip4_info()
6776 ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip); in hclge_fd_get_tcpip6_info()
6777 ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip); in hclge_fd_get_tcpip6_info()
6778 if (rule->unused_tuple & BIT(INNER_SRC_IP)) in hclge_fd_get_tcpip6_info()
6779 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); in hclge_fd_get_tcpip6_info()
6781 ipv6_addr_cpu_to_be32(spec_mask->ip6src, in hclge_fd_get_tcpip6_info()
6782 rule->tuples_mask.src_ip); in hclge_fd_get_tcpip6_info()
6784 if (rule->unused_tuple & BIT(INNER_DST_IP)) in hclge_fd_get_tcpip6_info()
6785 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); in hclge_fd_get_tcpip6_info()
6787 ipv6_addr_cpu_to_be32(spec_mask->ip6dst, in hclge_fd_get_tcpip6_info()
6788 rule->tuples_mask.dst_ip); in hclge_fd_get_tcpip6_info()
6790 spec->tclass = rule->tuples.ip_tos; in hclge_fd_get_tcpip6_info()
6791 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? in hclge_fd_get_tcpip6_info()
6792 0 : rule->tuples_mask.ip_tos; in hclge_fd_get_tcpip6_info()
6794 spec->psrc = cpu_to_be16(rule->tuples.src_port); in hclge_fd_get_tcpip6_info()
6795 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? in hclge_fd_get_tcpip6_info()
6796 0 : cpu_to_be16(rule->tuples_mask.src_port); in hclge_fd_get_tcpip6_info()
6798 spec->pdst = cpu_to_be16(rule->tuples.dst_port); in hclge_fd_get_tcpip6_info()
6799 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? in hclge_fd_get_tcpip6_info()
6800 0 : cpu_to_be16(rule->tuples_mask.dst_port); in hclge_fd_get_tcpip6_info()
6807 ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip); in hclge_fd_get_ip6_info()
6808 ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip); in hclge_fd_get_ip6_info()
6809 if (rule->unused_tuple & BIT(INNER_SRC_IP)) in hclge_fd_get_ip6_info()
6810 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); in hclge_fd_get_ip6_info()
6812 ipv6_addr_cpu_to_be32(spec_mask->ip6src, in hclge_fd_get_ip6_info()
6813 rule->tuples_mask.src_ip); in hclge_fd_get_ip6_info()
6815 if (rule->unused_tuple & BIT(INNER_DST_IP)) in hclge_fd_get_ip6_info()
6816 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); in hclge_fd_get_ip6_info()
6818 ipv6_addr_cpu_to_be32(spec_mask->ip6dst, in hclge_fd_get_ip6_info()
6819 rule->tuples_mask.dst_ip); in hclge_fd_get_ip6_info()
6821 spec->tclass = rule->tuples.ip_tos; in hclge_fd_get_ip6_info()
6822 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? in hclge_fd_get_ip6_info()
6823 0 : rule->tuples_mask.ip_tos; in hclge_fd_get_ip6_info()
6825 spec->l4_proto = rule->tuples.ip_proto; in hclge_fd_get_ip6_info()
6826 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? in hclge_fd_get_ip6_info()
6827 0 : rule->tuples_mask.ip_proto; in hclge_fd_get_ip6_info()
6834 ether_addr_copy(spec->h_source, rule->tuples.src_mac); in hclge_fd_get_ether_info()
6835 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac); in hclge_fd_get_ether_info()
6837 if (rule->unused_tuple & BIT(INNER_SRC_MAC)) in hclge_fd_get_ether_info()
6838 eth_zero_addr(spec_mask->h_source); in hclge_fd_get_ether_info()
6840 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac); in hclge_fd_get_ether_info()
6842 if (rule->unused_tuple & BIT(INNER_DST_MAC)) in hclge_fd_get_ether_info()
6843 eth_zero_addr(spec_mask->h_dest); in hclge_fd_get_ether_info()
6845 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac); in hclge_fd_get_ether_info()
6847 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto); in hclge_fd_get_ether_info()
6848 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? in hclge_fd_get_ether_info()
6849 0 : cpu_to_be16(rule->tuples_mask.ether_proto); in hclge_fd_get_ether_info()
6855 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) == in hclge_fd_get_user_def_info()
6857 fs->h_ext.data[0] = 0; in hclge_fd_get_user_def_info()
6858 fs->h_ext.data[1] = 0; in hclge_fd_get_user_def_info()
6859 fs->m_ext.data[0] = 0; in hclge_fd_get_user_def_info()
6860 fs->m_ext.data[1] = 0; in hclge_fd_get_user_def_info()
6862 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset); in hclge_fd_get_user_def_info()
6863 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data); in hclge_fd_get_user_def_info()
6864 fs->m_ext.data[0] = in hclge_fd_get_user_def_info()
6866 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask); in hclge_fd_get_user_def_info()
6873 if (fs->flow_type & FLOW_EXT) { in hclge_fd_get_ext_info()
6874 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); in hclge_fd_get_ext_info()
6875 fs->m_ext.vlan_tci = in hclge_fd_get_ext_info()
6876 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? in hclge_fd_get_ext_info()
6877 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); in hclge_fd_get_ext_info()
6882 if (fs->flow_type & FLOW_MAC_EXT) { in hclge_fd_get_ext_info()
6883 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); in hclge_fd_get_ext_info()
6884 if (rule->unused_tuple & BIT(INNER_DST_MAC)) in hclge_fd_get_ext_info()
6885 eth_zero_addr(fs->m_u.ether_spec.h_dest); in hclge_fd_get_ext_info()
6887 ether_addr_copy(fs->m_u.ether_spec.h_dest, in hclge_fd_get_ext_info()
6888 rule->tuples_mask.dst_mac); in hclge_fd_get_ext_info()
6898 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { in hclge_get_fd_rule()
6899 if (rule->location == location) in hclge_get_fd_rule()
6901 else if (rule->location > location) in hclge_get_fd_rule()
6911 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { in hclge_fd_get_ring_cookie()
6912 fs->ring_cookie = RX_CLS_FLOW_DISC; in hclge_fd_get_ring_cookie()
6916 fs->ring_cookie = rule->queue_id; in hclge_fd_get_ring_cookie()
6917 vf_id = rule->vf_id; in hclge_fd_get_ring_cookie()
6919 fs->ring_cookie |= vf_id; in hclge_fd_get_ring_cookie()
6928 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_info()
6931 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_get_fd_rule_info()
6932 return -EOPNOTSUPP; in hclge_get_fd_rule_info()
6934 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; in hclge_get_fd_rule_info()
6936 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6938 rule = hclge_get_fd_rule(hdev, fs->location); in hclge_get_fd_rule_info()
6940 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6941 return -ENOENT; in hclge_get_fd_rule_info()
6944 fs->flow_type = rule->flow_type; in hclge_get_fd_rule_info()
6945 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { in hclge_get_fd_rule_info()
6949 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec, in hclge_get_fd_rule_info()
6950 &fs->m_u.tcp_ip4_spec); in hclge_get_fd_rule_info()
6953 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec, in hclge_get_fd_rule_info()
6954 &fs->m_u.usr_ip4_spec); in hclge_get_fd_rule_info()
6959 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec, in hclge_get_fd_rule_info()
6960 &fs->m_u.tcp_ip6_spec); in hclge_get_fd_rule_info()
6963 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, in hclge_get_fd_rule_info()
6964 &fs->m_u.usr_ip6_spec); in hclge_get_fd_rule_info()
6971 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec, in hclge_get_fd_rule_info()
6972 &fs->m_u.ether_spec); in hclge_get_fd_rule_info()
6980 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6989 struct hclge_dev *hdev = vport->back; in hclge_get_all_rules()
6994 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_get_all_rules()
6995 return -EOPNOTSUPP; in hclge_get_all_rules()
6997 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_all_rules()
6999 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
7001 &hdev->fd_rule_list, rule_node) { in hclge_get_all_rules()
7002 if (cnt == cmd->rule_cnt) { in hclge_get_all_rules()
7003 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
7004 return -EMSGSIZE; in hclge_get_all_rules()
7007 if (rule->state == HCLGE_FD_TO_DEL) in hclge_get_all_rules()
7010 rule_locs[cnt] = rule->location; in hclge_get_all_rules()
7014 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
7016 cmd->rule_cnt = cnt; in hclge_get_all_rules()
7022 struct hclge_fd_rule_tuples *tuples) in hclge_fd_get_flow_tuples() argument
7024 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 in hclge_fd_get_flow_tuples()
7025 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 in hclge_fd_get_flow_tuples()
7027 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); in hclge_fd_get_flow_tuples()
7028 tuples->ip_proto = fkeys->basic.ip_proto; in hclge_fd_get_flow_tuples()
7029 tuples->dst_port = be16_to_cpu(fkeys->ports.dst); in hclge_fd_get_flow_tuples()
7031 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { in hclge_fd_get_flow_tuples()
7032 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); in hclge_fd_get_flow_tuples()
7033 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); in hclge_fd_get_flow_tuples()
7038 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); in hclge_fd_get_flow_tuples()
7039 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); in hclge_fd_get_flow_tuples()
7044 /* traverse all rules, check whether an existed rule has the same tuples */
7047 const struct hclge_fd_rule_tuples *tuples) in hclge_fd_search_flow_keys() argument
7052 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_fd_search_flow_keys()
7053 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples))) in hclge_fd_search_flow_keys()
7060 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples, in hclge_fd_build_arfs_rule() argument
7063 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | in hclge_fd_build_arfs_rule()
7066 rule->action = 0; in hclge_fd_build_arfs_rule()
7067 rule->vf_id = 0; in hclge_fd_build_arfs_rule()
7068 rule->rule_type = HCLGE_FD_ARFS_ACTIVE; in hclge_fd_build_arfs_rule()
7069 rule->state = HCLGE_FD_TO_ADD; in hclge_fd_build_arfs_rule()
7070 if (tuples->ether_proto == ETH_P_IP) { in hclge_fd_build_arfs_rule()
7071 if (tuples->ip_proto == IPPROTO_TCP) in hclge_fd_build_arfs_rule()
7072 rule->flow_type = TCP_V4_FLOW; in hclge_fd_build_arfs_rule()
7074 rule->flow_type = UDP_V4_FLOW; in hclge_fd_build_arfs_rule()
7076 if (tuples->ip_proto == IPPROTO_TCP) in hclge_fd_build_arfs_rule()
7077 rule->flow_type = TCP_V6_FLOW; in hclge_fd_build_arfs_rule()
7079 rule->flow_type = UDP_V6_FLOW; in hclge_fd_build_arfs_rule()
7081 memcpy(&rule->tuples, tuples, sizeof(rule->tuples)); in hclge_fd_build_arfs_rule()
7082 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask)); in hclge_fd_build_arfs_rule()
7090 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry_by_arfs()
7094 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_add_fd_entry_by_arfs()
7095 return -EOPNOTSUPP; in hclge_add_fd_entry_by_arfs()
7100 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7101 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && in hclge_add_fd_entry_by_arfs()
7102 hdev->fd_active_type != HCLGE_FD_RULE_NONE) { in hclge_add_fd_entry_by_arfs()
7103 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7104 return -EOPNOTSUPP; in hclge_add_fd_entry_by_arfs()
7116 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); in hclge_add_fd_entry_by_arfs()
7117 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_add_fd_entry_by_arfs()
7118 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7119 return -ENOSPC; in hclge_add_fd_entry_by_arfs()
7124 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7125 return -ENOMEM; in hclge_add_fd_entry_by_arfs()
7128 rule->location = bit_id; in hclge_add_fd_entry_by_arfs()
7129 rule->arfs.flow_id = flow_id; in hclge_add_fd_entry_by_arfs()
7130 rule->queue_id = queue_id; in hclge_add_fd_entry_by_arfs()
7132 hclge_update_fd_list(hdev, rule->state, rule->location, rule); in hclge_add_fd_entry_by_arfs()
7133 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; in hclge_add_fd_entry_by_arfs()
7134 } else if (rule->queue_id != queue_id) { in hclge_add_fd_entry_by_arfs()
7135 rule->queue_id = queue_id; in hclge_add_fd_entry_by_arfs()
7136 rule->state = HCLGE_FD_TO_ADD; in hclge_add_fd_entry_by_arfs()
7137 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_add_fd_entry_by_arfs()
7140 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7141 return rule->location; in hclge_add_fd_entry_by_arfs()
7147 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_rfs_filter_expire()
7151 spin_lock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7152 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { in hclge_rfs_filter_expire()
7153 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7156 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_rfs_filter_expire()
7157 if (rule->state != HCLGE_FD_ACTIVE) in hclge_rfs_filter_expire()
7159 if (rps_may_expire_flow(handle->netdev, rule->queue_id, in hclge_rfs_filter_expire()
7160 rule->arfs.flow_id, rule->location)) { in hclge_rfs_filter_expire()
7161 rule->state = HCLGE_FD_TO_DEL; in hclge_rfs_filter_expire()
7162 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_rfs_filter_expire()
7165 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7177 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) in hclge_clear_arfs_rules()
7180 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_clear_arfs_rules()
7181 switch (rule->state) { in hclge_clear_arfs_rules()
7185 rule->location, NULL, false); in hclge_clear_arfs_rules()
7190 hclge_fd_dec_rule_cnt(hdev, rule->location); in hclge_clear_arfs_rules()
7191 hlist_del(&rule->rule_node); in hclge_clear_arfs_rules()
7212 ethtype_key = ntohs(match.key->n_proto); in hclge_get_cls_key_basic()
7213 ethtype_mask = ntohs(match.mask->n_proto); in hclge_get_cls_key_basic()
7219 rule->tuples.ether_proto = ethtype_key; in hclge_get_cls_key_basic()
7220 rule->tuples_mask.ether_proto = ethtype_mask; in hclge_get_cls_key_basic()
7221 rule->tuples.ip_proto = match.key->ip_proto; in hclge_get_cls_key_basic()
7222 rule->tuples_mask.ip_proto = match.mask->ip_proto; in hclge_get_cls_key_basic()
7224 rule->unused_tuple |= BIT(INNER_IP_PROTO); in hclge_get_cls_key_basic()
7225 rule->unused_tuple |= BIT(INNER_ETH_TYPE); in hclge_get_cls_key_basic()
7236 ether_addr_copy(rule->tuples.dst_mac, match.key->dst); in hclge_get_cls_key_mac()
7237 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst); in hclge_get_cls_key_mac()
7238 ether_addr_copy(rule->tuples.src_mac, match.key->src); in hclge_get_cls_key_mac()
7239 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src); in hclge_get_cls_key_mac()
7241 rule->unused_tuple |= BIT(INNER_DST_MAC); in hclge_get_cls_key_mac()
7242 rule->unused_tuple |= BIT(INNER_SRC_MAC); in hclge_get_cls_key_mac()
7253 rule->tuples.vlan_tag1 = match.key->vlan_id | in hclge_get_cls_key_vlan()
7254 (match.key->vlan_priority << VLAN_PRIO_SHIFT); in hclge_get_cls_key_vlan()
7255 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id | in hclge_get_cls_key_vlan()
7256 (match.mask->vlan_priority << VLAN_PRIO_SHIFT); in hclge_get_cls_key_vlan()
7258 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST); in hclge_get_cls_key_vlan()
7272 addr_type = match.key->addr_type; in hclge_get_cls_key_ip()
7274 if (flow_rule_has_control_flags(match.mask->flags, extack)) in hclge_get_cls_key_ip()
7275 return -EOPNOTSUPP; in hclge_get_cls_key_ip()
7282 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src); in hclge_get_cls_key_ip()
7283 rule->tuples_mask.src_ip[IPV4_INDEX] = in hclge_get_cls_key_ip()
7284 be32_to_cpu(match.mask->src); in hclge_get_cls_key_ip()
7285 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst); in hclge_get_cls_key_ip()
7286 rule->tuples_mask.dst_ip[IPV4_INDEX] = in hclge_get_cls_key_ip()
7287 be32_to_cpu(match.mask->dst); in hclge_get_cls_key_ip()
7292 ipv6_addr_be32_to_cpu(rule->tuples.src_ip, in hclge_get_cls_key_ip()
7293 match.key->src.s6_addr32); in hclge_get_cls_key_ip()
7294 ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip, in hclge_get_cls_key_ip()
7295 match.mask->src.s6_addr32); in hclge_get_cls_key_ip()
7296 ipv6_addr_be32_to_cpu(rule->tuples.dst_ip, in hclge_get_cls_key_ip()
7297 match.key->dst.s6_addr32); in hclge_get_cls_key_ip()
7298 ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip, in hclge_get_cls_key_ip()
7299 match.mask->dst.s6_addr32); in hclge_get_cls_key_ip()
7301 rule->unused_tuple |= BIT(INNER_SRC_IP); in hclge_get_cls_key_ip()
7302 rule->unused_tuple |= BIT(INNER_DST_IP); in hclge_get_cls_key_ip()
7316 rule->tuples.src_port = be16_to_cpu(match.key->src); in hclge_get_cls_key_port()
7317 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src); in hclge_get_cls_key_port()
7318 rule->tuples.dst_port = be16_to_cpu(match.key->dst); in hclge_get_cls_key_port()
7319 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst); in hclge_get_cls_key_port()
7321 rule->unused_tuple |= BIT(INNER_SRC_PORT); in hclge_get_cls_key_port()
7322 rule->unused_tuple |= BIT(INNER_DST_PORT); in hclge_get_cls_key_port()
7331 struct netlink_ext_ack *extack = cls_flower->common.extack; in hclge_parse_cls_flower()
7332 struct flow_dissector *dissector = flow->match.dissector; in hclge_parse_cls_flower()
7335 if (dissector->used_keys & in hclge_parse_cls_flower()
7343 dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n", in hclge_parse_cls_flower()
7344 dissector->used_keys); in hclge_parse_cls_flower()
7345 return -EOPNOTSUPP; in hclge_parse_cls_flower()
7364 u32 prio = cls_flower->common.prio; in hclge_check_cls_flower()
7366 if (tc < 0 || tc > hdev->tc_max) { in hclge_check_cls_flower()
7367 dev_err(&hdev->pdev->dev, "invalid traffic class\n"); in hclge_check_cls_flower()
7368 return -EINVAL; in hclge_check_cls_flower()
7372 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_check_cls_flower()
7373 dev_err(&hdev->pdev->dev, in hclge_check_cls_flower()
7375 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); in hclge_check_cls_flower()
7376 return -EINVAL; in hclge_check_cls_flower()
7379 if (test_bit(prio - 1, hdev->fd_bmap)) { in hclge_check_cls_flower()
7380 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); in hclge_check_cls_flower()
7381 return -EINVAL; in hclge_check_cls_flower()
7391 struct hclge_dev *hdev = vport->back; in hclge_add_cls_flower()
7395 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_add_cls_flower()
7396 dev_err(&hdev->pdev->dev, in hclge_add_cls_flower()
7398 return -EOPNOTSUPP; in hclge_add_cls_flower()
7403 dev_err(&hdev->pdev->dev, in hclge_add_cls_flower()
7410 return -ENOMEM; in hclge_add_cls_flower()
7418 rule->action = HCLGE_FD_ACTION_SELECT_TC; in hclge_add_cls_flower()
7419 rule->cls_flower.tc = tc; in hclge_add_cls_flower()
7420 rule->location = cls_flower->common.prio - 1; in hclge_add_cls_flower()
7421 rule->vf_id = 0; in hclge_add_cls_flower()
7422 rule->cls_flower.cookie = cls_flower->cookie; in hclge_add_cls_flower()
7423 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; in hclge_add_cls_flower()
7438 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_find_cls_flower()
7439 if (rule->cls_flower.cookie == cookie) in hclge_find_cls_flower()
7450 struct hclge_dev *hdev = vport->back; in hclge_del_cls_flower()
7454 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_cls_flower()
7455 return -EOPNOTSUPP; in hclge_del_cls_flower()
7457 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7459 rule = hclge_find_cls_flower(hdev, cls_flower->cookie); in hclge_del_cls_flower()
7461 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7462 return -EINVAL; in hclge_del_cls_flower()
7465 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, in hclge_del_cls_flower()
7472 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL); in hclge_del_cls_flower()
7473 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_del_cls_flower()
7474 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7478 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); in hclge_del_cls_flower()
7479 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7490 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) in hclge_sync_fd_list()
7493 spin_lock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_list()
7496 switch (rule->state) { in hclge_sync_fd_list()
7501 rule->state = HCLGE_FD_ACTIVE; in hclge_sync_fd_list()
7505 rule->location, NULL, false); in hclge_sync_fd_list()
7508 hclge_fd_dec_rule_cnt(hdev, rule->location); in hclge_sync_fd_list()
7518 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_sync_fd_list()
7520 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_list()
7525 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_sync_fd_table()
7528 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { in hclge_sync_fd_table()
7529 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; in hclge_sync_fd_table()
7536 hclge_sync_fd_list(hdev, &hdev->fd_rule_list); in hclge_sync_fd_table()
7542 struct hclge_dev *hdev = vport->back; in hclge_get_hw_reset_stat()
7544 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || in hclge_get_hw_reset_stat()
7545 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); in hclge_get_hw_reset_stat()
7551 struct hclge_dev *hdev = vport->back; in hclge_get_cmdq_stat()
7553 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_get_cmdq_stat()
7559 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_resetting()
7561 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_ae_dev_resetting()
7567 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_reset_cnt()
7569 return hdev->rst_stats.hw_reset_done_cnt; in hclge_ae_dev_reset_cnt()
7575 struct hclge_dev *hdev = vport->back; in hclge_enable_fd()
7577 hdev->fd_en = enable; in hclge_enable_fd()
7580 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); in hclge_enable_fd()
7589 #define HCLGE_LINK_STATUS_WAIT_CNT 3 in hclge_cfg_mac_mode()
7612 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); in hclge_cfg_mac_mode()
7614 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_mode()
7616 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_mode()
7640 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; in hclge_config_switch_param()
7641 req->func_id = cpu_to_le32(func_id); in hclge_config_switch_param()
7643 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
7645 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
7652 req->switch_param = (req->switch_param & param_mask) | switch_param; in hclge_config_switch_param()
7653 req->param_mask = param_mask; in hclge_config_switch_param()
7655 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
7657 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
7667 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_phy_link_status_wait()
7674 dev_err(&hdev->pdev->dev, in hclge_phy_link_status_wait()
7679 if (phydev->link == link_ret) in hclge_phy_link_status_wait()
7702 return -EBUSY; in hclge_mac_link_status_wait()
7731 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
7733 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
7739 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); in hclge_set_app_loopback()
7742 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); in hclge_set_app_loopback()
7744 /* 3 Config mac work mode with loopback flag in hclge_set_app_loopback()
7748 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
7750 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
7777 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_cmd_send()
7779 return -ENOTSUPP; in hclge_cfg_common_loopback_cmd_send()
7782 req->mask = loop_mode_b; in hclge_cfg_common_loopback_cmd_send()
7784 req->enable = loop_mode_b; in hclge_cfg_common_loopback_cmd_send()
7786 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_common_loopback_cmd_send()
7788 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_cmd_send()
7811 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_common_loopback_wait()
7813 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_wait()
7819 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); in hclge_cfg_common_loopback_wait()
7821 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { in hclge_cfg_common_loopback_wait()
7822 dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); in hclge_cfg_common_loopback_wait()
7823 return -EBUSY; in hclge_cfg_common_loopback_wait()
7824 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { in hclge_cfg_common_loopback_wait()
7825 dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); in hclge_cfg_common_loopback_wait()
7826 return -EIO; in hclge_cfg_common_loopback_wait()
7857 dev_err(&hdev->pdev->dev, in hclge_set_common_loopback()
7868 if (!phydev->suspended) { in hclge_enable_phy_loopback()
7895 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_phy_loopback()
7902 return -ENOTSUPP; in hclge_set_phy_loopback()
7910 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
7919 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
7933 req->tqp_id = cpu_to_le16(tqp_id); in hclge_tqp_enable_cmd_send()
7934 req->stream_id = cpu_to_le16(stream_id); in hclge_tqp_enable_cmd_send()
7936 req->enable |= 1U << HCLGE_TQP_ENABLE_B; in hclge_tqp_enable_cmd_send()
7938 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_tqp_enable_cmd_send()
7944 struct hclge_dev *hdev = vport->back; in hclge_tqp_enable()
7948 for (i = 0; i < handle->kinfo.num_tqps; i++) { in hclge_tqp_enable()
7960 struct hclge_dev *hdev = vport->back; in hclge_set_loopback()
7968 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_set_loopback()
7991 ret = -ENOTSUPP; in hclge_set_loopback()
7992 dev_err(&hdev->pdev->dev, in hclge_set_loopback()
8002 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", in hclge_set_loopback()
8028 unsigned long last = hdev->serv_processed_cnt; in hclge_flush_link_update()
8031 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && in hclge_flush_link_update()
8033 last == hdev->serv_processed_cnt) in hclge_flush_link_update()
8040 struct hclge_dev *hdev = vport->back; in hclge_set_timer_task()
8046 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_set_timer_task()
8056 struct hclge_dev *hdev = vport->back; in hclge_ae_start()
8060 clear_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_start()
8061 hdev->hw.mac.link = 0; in hclge_ae_start()
8074 struct hclge_dev *hdev = vport->back; in hclge_ae_stop()
8076 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_stop()
8077 spin_lock_bh(&hdev->fd_rule_lock); in hclge_ae_stop()
8079 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_ae_stop()
8084 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { in hclge_ae_stop()
8087 if (hdev->reset_type != HNAE3_FUNC_RESET && in hclge_ae_stop()
8088 hdev->reset_type != HNAE3_FLR_RESET) { in hclge_ae_stop()
8111 struct hclge_dev *hdev = vport->back; in hclge_vport_start()
8113 set_bit(HCLGE_VPORT_STATE_INITED, &vport->state); in hclge_vport_start()
8114 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_vport_start()
8115 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); in hclge_vport_start()
8116 vport->last_active_jiffies = jiffies; in hclge_vport_start()
8117 vport->need_notify = 0; in hclge_vport_start()
8119 if (test_bit(vport->vport_id, hdev->vport_config_block)) { in hclge_vport_start()
8120 if (vport->vport_id) { in hclge_vport_start()
8128 clear_bit(vport->vport_id, hdev->vport_config_block); in hclge_vport_start()
8135 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); in hclge_vport_stop()
8136 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_vport_stop()
8137 vport->need_notify = 0; in hclge_vport_stop()
8158 struct hclge_dev *hdev = vport->back; in hclge_get_mac_vlan_cmd_status()
8161 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8164 return -EIO; in hclge_get_mac_vlan_cmd_status()
8172 return -ENOSPC; in hclge_get_mac_vlan_cmd_status()
8174 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8177 return -EIO; in hclge_get_mac_vlan_cmd_status()
8182 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8184 return -ENOENT; in hclge_get_mac_vlan_cmd_status()
8187 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8190 return -EIO; in hclge_get_mac_vlan_cmd_status()
8195 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8197 return -ENOENT; in hclge_get_mac_vlan_cmd_status()
8200 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8203 return -EIO; in hclge_get_mac_vlan_cmd_status()
8206 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8209 return -EINVAL; in hclge_get_mac_vlan_cmd_status()
8220 return -EIO; in hclge_update_desc_vfid()
8230 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; in hclge_update_desc_vfid()
8243 #define HCLGE_DESC_NUMBER 3 in hclge_is_all_function_id_zero()
8259 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | in hclge_prepare_mac_addr()
8263 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); in hclge_prepare_mac_addr()
8265 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); in hclge_prepare_mac_addr()
8266 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); in hclge_prepare_mac_addr()
8269 new_req->mac_addr_hi32 = cpu_to_le32(high_val); in hclge_prepare_mac_addr()
8270 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); in hclge_prepare_mac_addr()
8276 struct hclge_dev *hdev = vport->back; in hclge_remove_mac_vlan_tbl()
8286 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_remove_mac_vlan_tbl()
8288 dev_err(&hdev->pdev->dev, in hclge_remove_mac_vlan_tbl()
8305 struct hclge_dev *hdev = vport->back; in hclge_lookup_mac_vlan_tbl()
8323 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_lookup_mac_vlan_tbl()
8328 ret = hclge_cmd_send(&hdev->hw, desc, 1); in hclge_lookup_mac_vlan_tbl()
8331 dev_err(&hdev->pdev->dev, in hclge_lookup_mac_vlan_tbl()
8347 struct hclge_dev *hdev = vport->back; in hclge_add_mac_vlan_tbl()
8361 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mac_vlan_tbl()
8377 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); in hclge_add_mac_vlan_tbl()
8387 dev_err(&hdev->pdev->dev, in hclge_add_mac_vlan_tbl()
8406 req->space_size = cpu_to_le32(space_size); in hclge_set_umv_space()
8408 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_umv_space()
8410 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", in hclge_set_umv_space()
8425 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); in hclge_init_umv_space()
8429 if (allocated_size < hdev->wanted_umv_size) in hclge_init_umv_space()
8430 dev_warn(&hdev->pdev->dev, in hclge_init_umv_space()
8432 hdev->wanted_umv_size, allocated_size); in hclge_init_umv_space()
8434 hdev->max_umv_size = allocated_size; in hclge_init_umv_space()
8435 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); in hclge_init_umv_space()
8436 hdev->share_umv_size = hdev->priv_umv_size + in hclge_init_umv_space()
8437 hdev->max_umv_size % (hdev->num_alloc_vport + 1); in hclge_init_umv_space()
8439 if (hdev->ae_dev->dev_specs.mc_mac_size) in hclge_init_umv_space()
8440 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); in hclge_init_umv_space()
8450 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_umv_space()
8451 vport = &hdev->vport[i]; in hclge_reset_umv_space()
8452 vport->used_umv_num = 0; in hclge_reset_umv_space()
8455 mutex_lock(&hdev->vport_lock); in hclge_reset_umv_space()
8456 hdev->share_umv_size = hdev->priv_umv_size + in hclge_reset_umv_space()
8457 hdev->max_umv_size % (hdev->num_alloc_vport + 1); in hclge_reset_umv_space()
8458 mutex_unlock(&hdev->vport_lock); in hclge_reset_umv_space()
8460 hdev->used_mc_mac_num = 0; in hclge_reset_umv_space()
8465 struct hclge_dev *hdev = vport->back; in hclge_is_umv_space_full()
8469 mutex_lock(&hdev->vport_lock); in hclge_is_umv_space_full()
8471 is_full = (vport->used_umv_num >= hdev->priv_umv_size && in hclge_is_umv_space_full()
8472 hdev->share_umv_size == 0); in hclge_is_umv_space_full()
8475 mutex_unlock(&hdev->vport_lock); in hclge_is_umv_space_full()
8482 struct hclge_dev *hdev = vport->back; in hclge_update_umv_space()
8485 if (vport->used_umv_num > hdev->priv_umv_size) in hclge_update_umv_space()
8486 hdev->share_umv_size++; in hclge_update_umv_space()
8488 if (vport->used_umv_num > 0) in hclge_update_umv_space()
8489 vport->used_umv_num--; in hclge_update_umv_space()
8491 if (vport->used_umv_num >= hdev->priv_umv_size && in hclge_update_umv_space()
8492 hdev->share_umv_size > 0) in hclge_update_umv_space()
8493 hdev->share_umv_size--; in hclge_update_umv_space()
8494 vport->used_umv_num++; in hclge_update_umv_space()
8504 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) in hclge_find_mac_node()
8516 if (mac_node->state == HCLGE_MAC_TO_DEL) in hclge_update_mac_node()
8517 mac_node->state = HCLGE_MAC_ACTIVE; in hclge_update_mac_node()
8521 if (mac_node->state == HCLGE_MAC_TO_ADD) { in hclge_update_mac_node()
8522 list_del(&mac_node->node); in hclge_update_mac_node()
8525 mac_node->state = HCLGE_MAC_TO_DEL; in hclge_update_mac_node()
8528 /* only from tmp_add_list, the mac_node->state won't be in hclge_update_mac_node()
8532 if (mac_node->state == HCLGE_MAC_TO_ADD) in hclge_update_mac_node()
8533 mac_node->state = HCLGE_MAC_ACTIVE; in hclge_update_mac_node()
8545 struct hclge_dev *hdev = vport->back; in hclge_update_mac_list()
8550 &vport->uc_mac_list : &vport->mc_mac_list; in hclge_update_mac_list()
8552 spin_lock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
8561 spin_unlock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
8562 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); in hclge_update_mac_list()
8568 spin_unlock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
8570 dev_err(&hdev->pdev->dev, in hclge_update_mac_list()
8573 return -ENOENT; in hclge_update_mac_list()
8578 spin_unlock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
8579 return -ENOMEM; in hclge_update_mac_list()
8582 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); in hclge_update_mac_list()
8584 mac_node->state = state; in hclge_update_mac_list()
8585 ether_addr_copy(mac_node->mac_addr, addr); in hclge_update_mac_list()
8586 list_add_tail(&mac_node->node, list); in hclge_update_mac_list()
8588 spin_unlock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
8606 struct hclge_dev *hdev = vport->back; in hclge_add_uc_addr_common()
8617 dev_err(&hdev->pdev->dev, in hclge_add_uc_addr_common()
8622 return -EINVAL; in hclge_add_uc_addr_common()
8628 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); in hclge_add_uc_addr_common()
8639 if (ret == -ENOENT) { in hclge_add_uc_addr_common()
8640 mutex_lock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8645 mutex_unlock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8648 mutex_unlock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8650 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) in hclge_add_uc_addr_common()
8651 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", in hclge_add_uc_addr_common()
8652 hdev->priv_umv_size); in hclge_add_uc_addr_common()
8654 return -ENOSPC; in hclge_add_uc_addr_common()
8659 return -EEXIST; in hclge_add_uc_addr_common()
8677 struct hclge_dev *hdev = vport->back; in hclge_rm_uc_addr_common()
8686 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", in hclge_rm_uc_addr_common()
8688 return -EINVAL; in hclge_rm_uc_addr_common()
8695 if (!ret || ret == -ENOENT) { in hclge_rm_uc_addr_common()
8696 mutex_lock(&hdev->vport_lock); in hclge_rm_uc_addr_common()
8698 mutex_unlock(&hdev->vport_lock); in hclge_rm_uc_addr_common()
8718 struct hclge_dev *hdev = vport->back; in hclge_add_mc_addr_common()
8720 struct hclge_desc desc[3]; in hclge_add_mc_addr_common()
8727 dev_err(&hdev->pdev->dev, in hclge_add_mc_addr_common()
8730 return -EINVAL; in hclge_add_mc_addr_common()
8736 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && in hclge_add_mc_addr_common()
8737 hdev->used_mc_mac_num >= in hclge_add_mc_addr_common()
8738 hdev->ae_dev->dev_specs.mc_mac_size) in hclge_add_mc_addr_common()
8748 status = hclge_update_desc_vfid(desc, vport->vport_id, false); in hclge_add_mc_addr_common()
8752 if (status == -ENOSPC) in hclge_add_mc_addr_common()
8755 hdev->used_mc_mac_num++; in hclge_add_mc_addr_common()
8761 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) { in hclge_add_mc_addr_common()
8762 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; in hclge_add_mc_addr_common()
8763 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); in hclge_add_mc_addr_common()
8766 return -ENOSPC; in hclge_add_mc_addr_common()
8782 struct hclge_dev *hdev = vport->back; in hclge_rm_mc_addr_common()
8785 struct hclge_desc desc[3]; in hclge_rm_mc_addr_common()
8790 dev_dbg(&hdev->pdev->dev, in hclge_rm_mc_addr_common()
8793 return -EINVAL; in hclge_rm_mc_addr_common()
8801 status = hclge_update_desc_vfid(desc, vport->vport_id, true); in hclge_rm_mc_addr_common()
8809 hdev->used_mc_mac_num--; in hclge_rm_mc_addr_common()
8814 } else if (status == -ENOENT) { in hclge_rm_mc_addr_common()
8835 ret = sync(vport, mac_node->mac_addr); in hclge_sync_vport_mac_list()
8837 mac_node->state = HCLGE_MAC_ACTIVE; in hclge_sync_vport_mac_list()
8840 &vport->state); in hclge_sync_vport_mac_list()
8850 if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) || in hclge_sync_vport_mac_list()
8851 (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC)) in hclge_sync_vport_mac_list()
8871 ret = unsync(vport, mac_node->mac_addr); in hclge_unsync_vport_mac_list()
8872 if (!ret || ret == -ENOENT) { in hclge_unsync_vport_mac_list()
8873 list_del(&mac_node->node); in hclge_unsync_vport_mac_list()
8877 &vport->state); in hclge_unsync_vport_mac_list()
8890 if (mac_node->state == HCLGE_MAC_TO_ADD) in hclge_sync_from_add_list()
8901 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); in hclge_sync_from_add_list()
8903 hclge_update_mac_node(new_node, mac_node->state); in hclge_sync_from_add_list()
8904 list_del(&mac_node->node); in hclge_sync_from_add_list()
8906 } else if (mac_node->state == HCLGE_MAC_ACTIVE) { in hclge_sync_from_add_list()
8907 mac_node->state = HCLGE_MAC_TO_DEL; in hclge_sync_from_add_list()
8908 list_move_tail(&mac_node->node, mac_list); in hclge_sync_from_add_list()
8910 list_del(&mac_node->node); in hclge_sync_from_add_list()
8924 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); in hclge_sync_from_del_list()
8933 new_node->state = HCLGE_MAC_ACTIVE; in hclge_sync_from_del_list()
8934 list_del(&mac_node->node); in hclge_sync_from_del_list()
8937 list_move_tail(&mac_node->node, mac_list); in hclge_sync_from_del_list()
8948 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; in hclge_update_overflow_flags()
8950 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; in hclge_update_overflow_flags()
8953 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; in hclge_update_overflow_flags()
8955 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; in hclge_update_overflow_flags()
8974 &vport->uc_mac_list : &vport->mc_mac_list; in hclge_sync_vport_mac_table()
8976 spin_lock_bh(&vport->mac_list_lock); in hclge_sync_vport_mac_table()
8979 switch (mac_node->state) { in hclge_sync_vport_mac_table()
8981 list_move_tail(&mac_node->node, &tmp_del_list); in hclge_sync_vport_mac_table()
8987 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); in hclge_sync_vport_mac_table()
8988 new_node->state = mac_node->state; in hclge_sync_vport_mac_table()
8989 list_add_tail(&new_node->node, &tmp_add_list); in hclge_sync_vport_mac_table()
8997 spin_unlock_bh(&vport->mac_list_lock); in hclge_sync_vport_mac_table()
9006 spin_lock_bh(&vport->mac_list_lock); in hclge_sync_vport_mac_table()
9011 spin_unlock_bh(&vport->mac_list_lock); in hclge_sync_vport_mac_table()
9018 struct hclge_dev *hdev = vport->back; in hclge_need_sync_mac_table()
9020 if (test_bit(vport->vport_id, hdev->vport_config_block)) in hclge_need_sync_mac_table()
9023 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) in hclge_need_sync_mac_table()
9033 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_mac_table()
9034 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_mac_table()
9051 switch (mac_cfg->state) { in hclge_build_del_list()
9054 list_move_tail(&mac_cfg->node, tmp_del_list); in hclge_build_del_list()
9058 list_del(&mac_cfg->node); in hclge_build_del_list()
9076 ret = unsync(vport, mac_cfg->mac_addr); in hclge_unsync_del_list()
9077 if (!ret || ret == -ENOENT) { in hclge_unsync_del_list()
9083 mac_cfg->state == HCLGE_MAC_ACTIVE) { in hclge_unsync_del_list()
9084 mac_cfg->state = HCLGE_MAC_TO_ADD; in hclge_unsync_del_list()
9086 list_del(&mac_cfg->node); in hclge_unsync_del_list()
9090 mac_cfg->state = HCLGE_MAC_TO_DEL; in hclge_unsync_del_list()
9099 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_mac_table()
9103 list = &vport->uc_mac_list; in hclge_rm_vport_all_mac_table()
9106 list = &vport->mc_mac_list; in hclge_rm_vport_all_mac_table()
9113 set_bit(vport->vport_id, hdev->vport_config_block); in hclge_rm_vport_all_mac_table()
9115 spin_lock_bh(&vport->mac_list_lock); in hclge_rm_vport_all_mac_table()
9119 spin_unlock_bh(&vport->mac_list_lock); in hclge_rm_vport_all_mac_table()
9123 spin_lock_bh(&vport->mac_list_lock); in hclge_rm_vport_all_mac_table()
9127 spin_unlock_bh(&vport->mac_list_lock); in hclge_rm_vport_all_mac_table()
9135 struct hclge_dev *hdev = vport->back; in hclge_uninit_vport_mac_list()
9141 &vport->uc_mac_list : &vport->mc_mac_list; in hclge_uninit_vport_mac_list()
9143 spin_lock_bh(&vport->mac_list_lock); in hclge_uninit_vport_mac_list()
9146 switch (mac_node->state) { in hclge_uninit_vport_mac_list()
9149 list_move_tail(&mac_node->node, &tmp_del_list); in hclge_uninit_vport_mac_list()
9152 list_del(&mac_node->node); in hclge_uninit_vport_mac_list()
9158 spin_unlock_bh(&vport->mac_list_lock); in hclge_uninit_vport_mac_list()
9163 dev_warn(&hdev->pdev->dev, in hclge_uninit_vport_mac_list()
9166 vport->vport_id); in hclge_uninit_vport_mac_list()
9169 list_del(&mac_node->node); in hclge_uninit_vport_mac_list()
9179 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_mac_table()
9180 vport = &hdev->vport[i]; in hclge_uninit_mac_table()
9192 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 in hclge_get_mac_ethertype_cmd_status()
9197 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9200 return -EIO; in hclge_get_mac_ethertype_cmd_status()
9209 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9211 return_status = -EIO; in hclge_get_mac_ethertype_cmd_status()
9214 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9216 return_status = -EIO; in hclge_get_mac_ethertype_cmd_status()
9219 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9222 return_status = -EIO; in hclge_get_mac_ethertype_cmd_status()
9233 struct hclge_dev *hdev = vport->back; in hclge_set_vf_mac()
9237 return -EINVAL; in hclge_set_vf_mac()
9240 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { in hclge_set_vf_mac()
9241 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9247 ether_addr_copy(vport->vf_info.mac, mac_addr); in hclge_set_vf_mac()
9253 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { in hclge_set_vf_mac()
9254 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9261 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9278 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mgr_tbl()
9280 dev_err(&hdev->pdev->dev, in hclge_add_mgr_tbl()
9300 dev_err(&hdev->pdev->dev, in init_mgr_tbl()
9313 struct hclge_dev *hdev = vport->back; in hclge_get_mac_addr()
9315 ether_addr_copy(p, hdev->hw.mac.mac_addr); in hclge_get_mac_addr()
9321 struct list_head *list = &vport->uc_mac_list; in hclge_update_mac_node_for_dev_addr()
9328 return -ENOMEM; in hclge_update_mac_node_for_dev_addr()
9330 new_node->state = HCLGE_MAC_TO_ADD; in hclge_update_mac_node_for_dev_addr()
9331 ether_addr_copy(new_node->mac_addr, new_addr); in hclge_update_mac_node_for_dev_addr()
9332 list_add(&new_node->node, list); in hclge_update_mac_node_for_dev_addr()
9334 if (new_node->state == HCLGE_MAC_TO_DEL) in hclge_update_mac_node_for_dev_addr()
9335 new_node->state = HCLGE_MAC_ACTIVE; in hclge_update_mac_node_for_dev_addr()
9338 * addr may be not re-added into mac table for the umv space in hclge_update_mac_node_for_dev_addr()
9342 list_move(&new_node->node, list); in hclge_update_mac_node_for_dev_addr()
9348 if (old_node->state == HCLGE_MAC_TO_ADD) { in hclge_update_mac_node_for_dev_addr()
9349 list_del(&old_node->node); in hclge_update_mac_node_for_dev_addr()
9352 old_node->state = HCLGE_MAC_TO_DEL; in hclge_update_mac_node_for_dev_addr()
9357 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); in hclge_update_mac_node_for_dev_addr()
9368 struct hclge_dev *hdev = vport->back; in hclge_set_mac_addr()
9377 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9380 return -EINVAL; in hclge_set_mac_addr()
9385 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9392 old_addr = hdev->hw.mac.mac_addr; in hclge_set_mac_addr()
9394 spin_lock_bh(&vport->mac_list_lock); in hclge_set_mac_addr()
9398 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9401 spin_unlock_bh(&vport->mac_list_lock); in hclge_set_mac_addr()
9411 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); in hclge_set_mac_addr()
9412 spin_unlock_bh(&vport->mac_list_lock); in hclge_set_mac_addr()
9424 return -EOPNOTSUPP; in hclge_mii_ioctl()
9428 data->phy_id = hdev->hw.mac.phy_addr; in hclge_mii_ioctl()
9432 data->val_out = hclge_read_phy_reg(hdev, data->reg_num); in hclge_mii_ioctl()
9436 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); in hclge_mii_ioctl()
9438 return -EOPNOTSUPP; in hclge_mii_ioctl()
9446 struct hclge_dev *hdev = vport->back; in hclge_do_ioctl()
9454 if (!hdev->hw.mac.phydev) in hclge_do_ioctl()
9458 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); in hclge_do_ioctl()
9470 req->vf_id = vf_id; in hclge_set_port_vlan_filter_bypass()
9471 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B, in hclge_set_port_vlan_filter_bypass()
9474 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_port_vlan_filter_bypass()
9476 dev_err(&hdev->pdev->dev, in hclge_set_port_vlan_filter_bypass()
9493 req->vlan_type = vlan_type; in hclge_set_vlan_filter_ctrl()
9494 req->vf_id = vf_id; in hclge_set_vlan_filter_ctrl()
9496 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
9498 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", in hclge_set_vlan_filter_ctrl()
9505 req->vlan_fe = filter_en ? in hclge_set_vlan_filter_ctrl()
9506 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); in hclge_set_vlan_filter_ctrl()
9508 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
9510 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", in hclge_set_vlan_filter_ctrl()
9518 struct hclge_dev *hdev = vport->back; in hclge_set_vport_vlan_filter()
9519 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclge_set_vport_vlan_filter()
9522 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vport_vlan_filter()
9525 enable, vport->vport_id); in hclge_set_vport_vlan_filter()
9529 vport->vport_id); in hclge_set_vport_vlan_filter()
9533 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) { in hclge_set_vport_vlan_filter()
9534 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, in hclge_set_vport_vlan_filter()
9536 } else if (!vport->vport_id) { in hclge_set_vport_vlan_filter()
9537 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) in hclge_set_vport_vlan_filter()
9550 struct hnae3_handle *handle = &vport->nic; in hclge_need_enable_vport_vlan_filter()
9552 struct hclge_dev *hdev = vport->back; in hclge_need_enable_vport_vlan_filter()
9554 if (vport->vport_id) { in hclge_need_enable_vport_vlan_filter()
9555 if (vport->port_base_vlan_cfg.state != in hclge_need_enable_vport_vlan_filter()
9559 if (vport->vf_info.trusted && vport->vf_info.request_uc_en) in hclge_need_enable_vport_vlan_filter()
9561 } else if (handle->netdev_flags & HNAE3_USER_UPE) { in hclge_need_enable_vport_vlan_filter()
9565 if (!vport->req_vlan_fltr_en) in hclge_need_enable_vport_vlan_filter()
9569 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) in hclge_need_enable_vport_vlan_filter()
9572 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) in hclge_need_enable_vport_vlan_filter()
9573 if (vlan->vlan_id != 0) in hclge_need_enable_vport_vlan_filter()
9581 struct hclge_dev *hdev = vport->back; in hclge_enable_vport_vlan_filter()
9585 mutex_lock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9587 vport->req_vlan_fltr_en = request_en; in hclge_enable_vport_vlan_filter()
9590 if (need_en == vport->cur_vlan_fltr_en) { in hclge_enable_vport_vlan_filter()
9591 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9597 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9601 vport->cur_vlan_fltr_en = need_en; in hclge_enable_vport_vlan_filter()
9603 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9638 req0->vlan_id = cpu_to_le16(vlan); in hclge_set_vf_vlan_filter_cmd()
9639 req0->vlan_cfg = is_kill; in hclge_set_vf_vlan_filter_cmd()
9642 req0->vf_bitmap[vf_byte_off] = vf_byte_val; in hclge_set_vf_vlan_filter_cmd()
9644 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; in hclge_set_vf_vlan_filter_cmd()
9646 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_set_vf_vlan_filter_cmd()
9648 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_filter_cmd()
9666 if (!req->resp_code || req->resp_code == 1) in hclge_check_vf_vlan_cmd_status()
9669 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { in hclge_check_vf_vlan_cmd_status()
9670 set_bit(vfid, hdev->vf_vlan_full); in hclge_check_vf_vlan_cmd_status()
9671 dev_warn(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9676 dev_err(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9678 req->resp_code); in hclge_check_vf_vlan_cmd_status()
9681 if (!req->resp_code) in hclge_check_vf_vlan_cmd_status()
9689 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) in hclge_check_vf_vlan_cmd_status()
9692 dev_err(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9694 req->resp_code); in hclge_check_vf_vlan_cmd_status()
9697 return -EIO; in hclge_check_vf_vlan_cmd_status()
9703 struct hclge_vport *vport = &hdev->vport[vfid]; in hclge_set_vf_vlan_common()
9712 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { in hclge_set_vf_vlan_common()
9713 if (vport->vf_info.spoofchk && vlan) { in hclge_set_vf_vlan_common()
9714 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
9716 return -EPERM; in hclge_set_vf_vlan_common()
9746 req->vlan_offset = vlan_offset_160; in hclge_set_port_vlan_filter()
9747 req->vlan_cfg = is_kill; in hclge_set_port_vlan_filter()
9748 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; in hclge_set_port_vlan_filter()
9750 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_port_vlan_filter()
9752 dev_err(&hdev->pdev->dev, in hclge_set_port_vlan_filter()
9762 test_bit(vport_id, hdev->vlan_table[vlan_id])) in hclge_need_update_port_vlan()
9765 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_need_update_port_vlan()
9766 dev_warn(&hdev->pdev->dev, in hclge_need_update_port_vlan()
9773 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_need_update_port_vlan()
9774 dev_warn(&hdev->pdev->dev, in hclge_need_update_port_vlan()
9794 return -EINVAL; in hclge_set_vlan_filter_hw()
9798 dev_err(&hdev->pdev->dev, in hclge_set_vlan_filter_hw()
9807 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) in hclge_set_vlan_filter_hw()
9819 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; in hclge_set_vlan_tx_offload_cfg()
9821 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_tx_offload_cfg()
9829 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); in hclge_set_vlan_tx_offload_cfg()
9830 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); in hclge_set_vlan_tx_offload_cfg()
9831 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, in hclge_set_vlan_tx_offload_cfg()
9832 vcfg->accept_tag1 ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9833 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, in hclge_set_vlan_tx_offload_cfg()
9834 vcfg->accept_untag1 ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9835 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, in hclge_set_vlan_tx_offload_cfg()
9836 vcfg->accept_tag2 ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9837 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, in hclge_set_vlan_tx_offload_cfg()
9838 vcfg->accept_untag2 ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9839 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, in hclge_set_vlan_tx_offload_cfg()
9840 vcfg->insert_tag1_en ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9841 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, in hclge_set_vlan_tx_offload_cfg()
9842 vcfg->insert_tag2_en ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9843 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B, in hclge_set_vlan_tx_offload_cfg()
9844 vcfg->tag_shift_mode_en ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9845 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); in hclge_set_vlan_tx_offload_cfg()
9847 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; in hclge_set_vlan_tx_offload_cfg()
9848 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / in hclge_set_vlan_tx_offload_cfg()
9850 req->vf_bitmap[bmap_index] = in hclge_set_vlan_tx_offload_cfg()
9851 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); in hclge_set_vlan_tx_offload_cfg()
9853 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_tx_offload_cfg()
9855 dev_err(&hdev->pdev->dev, in hclge_set_vlan_tx_offload_cfg()
9864 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; in hclge_set_vlan_rx_offload_cfg()
9866 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_rx_offload_cfg()
9874 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, in hclge_set_vlan_rx_offload_cfg()
9875 vcfg->strip_tag1_en ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
9876 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, in hclge_set_vlan_rx_offload_cfg()
9877 vcfg->strip_tag2_en ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
9878 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, in hclge_set_vlan_rx_offload_cfg()
9879 vcfg->vlan1_vlan_prionly ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
9880 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, in hclge_set_vlan_rx_offload_cfg()
9881 vcfg->vlan2_vlan_prionly ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
9882 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B, in hclge_set_vlan_rx_offload_cfg()
9883 vcfg->strip_tag1_discard_en ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
9884 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B, in hclge_set_vlan_rx_offload_cfg()
9885 vcfg->strip_tag2_discard_en ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
9887 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; in hclge_set_vlan_rx_offload_cfg()
9888 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / in hclge_set_vlan_rx_offload_cfg()
9890 req->vf_bitmap[bmap_index] = in hclge_set_vlan_rx_offload_cfg()
9891 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); in hclge_set_vlan_rx_offload_cfg()
9893 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_rx_offload_cfg()
9895 dev_err(&hdev->pdev->dev, in hclge_set_vlan_rx_offload_cfg()
9909 vport->txvlan_cfg.accept_tag1 = true; in hclge_vlan_offload_cfg()
9910 vport->txvlan_cfg.insert_tag1_en = false; in hclge_vlan_offload_cfg()
9911 vport->txvlan_cfg.default_tag1 = 0; in hclge_vlan_offload_cfg()
9913 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); in hclge_vlan_offload_cfg()
9915 vport->txvlan_cfg.accept_tag1 = in hclge_vlan_offload_cfg()
9916 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3; in hclge_vlan_offload_cfg()
9917 vport->txvlan_cfg.insert_tag1_en = true; in hclge_vlan_offload_cfg()
9918 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) | in hclge_vlan_offload_cfg()
9922 vport->txvlan_cfg.accept_untag1 = true; in hclge_vlan_offload_cfg()
9928 vport->txvlan_cfg.accept_tag2 = true; in hclge_vlan_offload_cfg()
9929 vport->txvlan_cfg.accept_untag2 = true; in hclge_vlan_offload_cfg()
9930 vport->txvlan_cfg.insert_tag2_en = false; in hclge_vlan_offload_cfg()
9931 vport->txvlan_cfg.default_tag2 = 0; in hclge_vlan_offload_cfg()
9932 vport->txvlan_cfg.tag_shift_mode_en = true; in hclge_vlan_offload_cfg()
9935 vport->rxvlan_cfg.strip_tag1_en = false; in hclge_vlan_offload_cfg()
9936 vport->rxvlan_cfg.strip_tag2_en = in hclge_vlan_offload_cfg()
9937 vport->rxvlan_cfg.rx_vlan_offload_en; in hclge_vlan_offload_cfg()
9938 vport->rxvlan_cfg.strip_tag2_discard_en = false; in hclge_vlan_offload_cfg()
9940 vport->rxvlan_cfg.strip_tag1_en = in hclge_vlan_offload_cfg()
9941 vport->rxvlan_cfg.rx_vlan_offload_en; in hclge_vlan_offload_cfg()
9942 vport->rxvlan_cfg.strip_tag2_en = true; in hclge_vlan_offload_cfg()
9943 vport->rxvlan_cfg.strip_tag2_discard_en = true; in hclge_vlan_offload_cfg()
9946 vport->rxvlan_cfg.strip_tag1_discard_en = false; in hclge_vlan_offload_cfg()
9947 vport->rxvlan_cfg.vlan1_vlan_prionly = false; in hclge_vlan_offload_cfg()
9948 vport->rxvlan_cfg.vlan2_vlan_prionly = false; in hclge_vlan_offload_cfg()
9966 rx_req->ot_fst_vlan_type = in hclge_set_vlan_protocol_type()
9967 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); in hclge_set_vlan_protocol_type()
9968 rx_req->ot_sec_vlan_type = in hclge_set_vlan_protocol_type()
9969 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); in hclge_set_vlan_protocol_type()
9970 rx_req->in_fst_vlan_type = in hclge_set_vlan_protocol_type()
9971 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); in hclge_set_vlan_protocol_type()
9972 rx_req->in_sec_vlan_type = in hclge_set_vlan_protocol_type()
9973 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); in hclge_set_vlan_protocol_type()
9975 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
9977 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
9986 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); in hclge_set_vlan_protocol_type()
9987 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); in hclge_set_vlan_protocol_type()
9989 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
9991 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
10005 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_init_vlan_filter()
10011 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vlan_filter()
10012 vport = &hdev->vport[i]; in hclge_init_vlan_filter()
10015 vport->vport_id); in hclge_init_vlan_filter()
10018 vport->cur_vlan_fltr_en = true; in hclge_init_vlan_filter()
10021 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) && in hclge_init_vlan_filter()
10022 !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps)) in hclge_init_vlan_filter()
10031 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10032 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10033 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10034 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10035 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10036 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10048 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vport_vlan_offload()
10049 vport = &hdev->vport[i]; in hclge_init_vport_vlan_offload()
10050 cfg = &vport->port_base_vlan_cfg; in hclge_init_vport_vlan_offload()
10052 ret = hclge_vlan_offload_cfg(vport, cfg->state, in hclge_init_vport_vlan_offload()
10053 cfg->vlan_info.vlan_tag, in hclge_init_vport_vlan_offload()
10054 cfg->vlan_info.qos); in hclge_init_vport_vlan_offload()
10063 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_init_vlan_config()
10085 struct hclge_dev *hdev = vport->back; in hclge_add_vport_vlan_table()
10087 mutex_lock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10089 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_add_vport_vlan_table()
10090 if (vlan->vlan_id == vlan_id) { in hclge_add_vport_vlan_table()
10091 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10098 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10102 vlan->hd_tbl_status = writen_to_tbl; in hclge_add_vport_vlan_table()
10103 vlan->vlan_id = vlan_id; in hclge_add_vport_vlan_table()
10105 list_add_tail(&vlan->node, &vport->vlan_list); in hclge_add_vport_vlan_table()
10106 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10112 struct hclge_dev *hdev = vport->back; in hclge_add_vport_all_vlan_table()
10115 mutex_lock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10117 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_add_vport_all_vlan_table()
10118 if (!vlan->hd_tbl_status) { in hclge_add_vport_all_vlan_table()
10120 vport->vport_id, in hclge_add_vport_all_vlan_table()
10121 vlan->vlan_id, false); in hclge_add_vport_all_vlan_table()
10123 dev_err(&hdev->pdev->dev, in hclge_add_vport_all_vlan_table()
10127 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10131 vlan->hd_tbl_status = true; in hclge_add_vport_all_vlan_table()
10134 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10143 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_vlan_table()
10145 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_rm_vport_vlan_table()
10146 if (vlan->vlan_id == vlan_id) { in hclge_rm_vport_vlan_table()
10147 if (is_write_tbl && vlan->hd_tbl_status) in hclge_rm_vport_vlan_table()
10150 vport->vport_id, in hclge_rm_vport_vlan_table()
10154 list_del(&vlan->node); in hclge_rm_vport_vlan_table()
10164 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_vlan_table()
10166 mutex_lock(&hdev->vport_lock); in hclge_rm_vport_all_vlan_table()
10168 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_rm_vport_all_vlan_table()
10169 if (vlan->hd_tbl_status) in hclge_rm_vport_all_vlan_table()
10172 vport->vport_id, in hclge_rm_vport_all_vlan_table()
10173 vlan->vlan_id, in hclge_rm_vport_all_vlan_table()
10176 vlan->hd_tbl_status = false; in hclge_rm_vport_all_vlan_table()
10178 list_del(&vlan->node); in hclge_rm_vport_all_vlan_table()
10182 clear_bit(vport->vport_id, hdev->vf_vlan_full); in hclge_rm_vport_all_vlan_table()
10183 mutex_unlock(&hdev->vport_lock); in hclge_rm_vport_all_vlan_table()
10192 mutex_lock(&hdev->vport_lock); in hclge_uninit_vport_vlan_table()
10194 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_vport_vlan_table()
10195 vport = &hdev->vport[i]; in hclge_uninit_vport_vlan_table()
10196 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_uninit_vport_vlan_table()
10197 list_del(&vlan->node); in hclge_uninit_vport_vlan_table()
10202 mutex_unlock(&hdev->vport_lock); in hclge_uninit_vport_vlan_table()
10216 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { in hclge_restore_vport_port_base_vlan_config()
10217 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; in hclge_restore_vport_port_base_vlan_config()
10218 vlan_info = vport->port_base_vlan_cfg.tbl_sta ? in hclge_restore_vport_port_base_vlan_config()
10219 &vport->port_base_vlan_cfg.vlan_info : in hclge_restore_vport_port_base_vlan_config()
10220 &vport->port_base_vlan_cfg.old_vlan_info; in hclge_restore_vport_port_base_vlan_config()
10222 vlan_id = vlan_info->vlan_tag; in hclge_restore_vport_port_base_vlan_config()
10223 vlan_proto = vlan_info->vlan_proto; in hclge_restore_vport_port_base_vlan_config()
10224 state = vport->port_base_vlan_cfg.state; in hclge_restore_vport_port_base_vlan_config()
10227 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); in hclge_restore_vport_port_base_vlan_config()
10229 vport->vport_id, in hclge_restore_vport_port_base_vlan_config()
10231 vport->port_base_vlan_cfg.tbl_sta = ret == 0; in hclge_restore_vport_port_base_vlan_config()
10239 struct hclge_dev *hdev = vport->back; in hclge_restore_vport_vlan_table()
10242 mutex_lock(&hdev->vport_lock); in hclge_restore_vport_vlan_table()
10244 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { in hclge_restore_vport_vlan_table()
10245 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_restore_vport_vlan_table()
10247 vport->vport_id, in hclge_restore_vport_vlan_table()
10248 vlan->vlan_id, false); in hclge_restore_vport_vlan_table()
10251 vlan->hd_tbl_status = true; in hclge_restore_vport_vlan_table()
10255 mutex_unlock(&hdev->vport_lock); in hclge_restore_vport_vlan_table()
10269 if (mac_node->state == HCLGE_MAC_ACTIVE) { in hclge_mac_node_convert_for_reset()
10270 mac_node->state = HCLGE_MAC_TO_ADD; in hclge_mac_node_convert_for_reset()
10271 } else if (mac_node->state == HCLGE_MAC_TO_DEL) { in hclge_mac_node_convert_for_reset()
10272 list_del(&mac_node->node); in hclge_mac_node_convert_for_reset()
10280 spin_lock_bh(&vport->mac_list_lock); in hclge_restore_mac_table_common()
10282 hclge_mac_node_convert_for_reset(&vport->uc_mac_list); in hclge_restore_mac_table_common()
10283 hclge_mac_node_convert_for_reset(&vport->mc_mac_list); in hclge_restore_mac_table_common()
10284 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); in hclge_restore_mac_table_common()
10286 spin_unlock_bh(&vport->mac_list_lock); in hclge_restore_mac_table_common()
10291 struct hclge_vport *vport = &hdev->vport[0]; in hclge_restore_hw_table()
10292 struct hnae3_handle *handle = &vport->nic; in hclge_restore_hw_table()
10297 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_restore_hw_table()
10305 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { in hclge_en_hw_strip_rxvtag()
10306 vport->rxvlan_cfg.strip_tag1_en = false; in hclge_en_hw_strip_rxvtag()
10307 vport->rxvlan_cfg.strip_tag2_en = enable; in hclge_en_hw_strip_rxvtag()
10308 vport->rxvlan_cfg.strip_tag2_discard_en = false; in hclge_en_hw_strip_rxvtag()
10310 vport->rxvlan_cfg.strip_tag1_en = enable; in hclge_en_hw_strip_rxvtag()
10311 vport->rxvlan_cfg.strip_tag2_en = true; in hclge_en_hw_strip_rxvtag()
10312 vport->rxvlan_cfg.strip_tag2_discard_en = true; in hclge_en_hw_strip_rxvtag()
10315 vport->rxvlan_cfg.strip_tag1_discard_en = false; in hclge_en_hw_strip_rxvtag()
10316 vport->rxvlan_cfg.vlan1_vlan_prionly = false; in hclge_en_hw_strip_rxvtag()
10317 vport->rxvlan_cfg.vlan2_vlan_prionly = false; in hclge_en_hw_strip_rxvtag()
10318 vport->rxvlan_cfg.rx_vlan_offload_en = enable; in hclge_en_hw_strip_rxvtag()
10325 struct hclge_dev *hdev = vport->back; in hclge_set_vport_vlan_fltr_change()
10327 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) in hclge_set_vport_vlan_fltr_change()
10328 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); in hclge_set_vport_vlan_fltr_change()
10336 struct hclge_dev *hdev = vport->back; in hclge_update_vlan_filter_entries()
10342 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0); in hclge_update_vlan_filter_entries()
10346 htons(new_info->vlan_proto), in hclge_update_vlan_filter_entries()
10347 vport->vport_id, in hclge_update_vlan_filter_entries()
10348 new_info->vlan_tag, in hclge_update_vlan_filter_entries()
10352 vport->port_base_vlan_cfg.tbl_sta = false; in hclge_update_vlan_filter_entries()
10355 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); in hclge_update_vlan_filter_entries()
10359 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), in hclge_update_vlan_filter_entries()
10360 vport->vport_id, old_info->vlan_tag, in hclge_update_vlan_filter_entries()
10371 if (new_cfg->vlan_tag != old_cfg->vlan_tag) in hclge_need_update_vlan_filter()
10374 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0)) in hclge_need_update_vlan_filter()
10384 struct hclge_dev *hdev = vport->back; in hclge_modify_port_base_vlan_tag()
10388 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), in hclge_modify_port_base_vlan_tag()
10389 vport->vport_id, new_info->vlan_tag, in hclge_modify_port_base_vlan_tag()
10394 vport->port_base_vlan_cfg.tbl_sta = false; in hclge_modify_port_base_vlan_tag()
10396 if (old_info->vlan_tag == 0) in hclge_modify_port_base_vlan_tag()
10397 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, in hclge_modify_port_base_vlan_tag()
10401 vport->vport_id, in hclge_modify_port_base_vlan_tag()
10402 old_info->vlan_tag, true); in hclge_modify_port_base_vlan_tag()
10404 dev_err(&hdev->pdev->dev, in hclge_modify_port_base_vlan_tag()
10406 vport->vport_id, old_info->vlan_tag, ret); in hclge_modify_port_base_vlan_tag()
10414 struct hnae3_handle *nic = &vport->nic; in hclge_update_port_base_vlan_cfg()
10418 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; in hclge_update_port_base_vlan_cfg()
10420 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag, in hclge_update_port_base_vlan_cfg()
10421 vlan_info->qos); in hclge_update_port_base_vlan_cfg()
10438 vport->port_base_vlan_cfg.state = state; in hclge_update_port_base_vlan_cfg()
10440 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; in hclge_update_port_base_vlan_cfg()
10442 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; in hclge_update_port_base_vlan_cfg()
10444 vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; in hclge_update_port_base_vlan_cfg()
10445 vport->port_base_vlan_cfg.vlan_info = *vlan_info; in hclge_update_port_base_vlan_cfg()
10446 vport->port_base_vlan_cfg.tbl_sta = true; in hclge_update_port_base_vlan_cfg()
10466 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan && in hclge_get_port_base_vlan_state()
10467 vport->port_base_vlan_cfg.vlan_info.qos == qos) in hclge_get_port_base_vlan_state()
10476 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); in hclge_set_vf_vlan_filter()
10478 struct hclge_dev *hdev = vport->back; in hclge_set_vf_vlan_filter()
10483 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vf_vlan_filter()
10484 return -EOPNOTSUPP; in hclge_set_vf_vlan_filter()
10488 return -EINVAL; in hclge_set_vf_vlan_filter()
10490 /* qos is a 3 bits value, so can not be bigger than 7 */ in hclge_set_vf_vlan_filter()
10491 if (vlan > VLAN_N_VID - 1 || qos > 7) in hclge_set_vf_vlan_filter()
10492 return -EINVAL; in hclge_set_vf_vlan_filter()
10494 return -EPROTONOSUPPORT; in hclge_set_vf_vlan_filter()
10497 vport->port_base_vlan_cfg.state, in hclge_set_vf_vlan_filter()
10508 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_filter()
10520 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { in hclge_set_vf_vlan_filter()
10521 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) in hclge_set_vf_vlan_filter()
10522 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], in hclge_set_vf_vlan_filter()
10523 vport->vport_id, in hclge_set_vf_vlan_filter()
10528 &vport->need_notify); in hclge_set_vf_vlan_filter()
10541 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { in hclge_clear_vf_vlan()
10542 vport = &hdev->vport[vf]; in hclge_clear_vf_vlan()
10543 vlan_info = &vport->port_base_vlan_cfg.vlan_info; in hclge_clear_vf_vlan()
10546 vport->vport_id, in hclge_clear_vf_vlan()
10547 vlan_info->vlan_tag, true); in hclge_clear_vf_vlan()
10549 dev_err(&hdev->pdev->dev, in hclge_clear_vf_vlan()
10551 vf - HCLGE_VF_VPORT_START_NUM, ret); in hclge_clear_vf_vlan()
10559 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_filter()
10567 mutex_lock(&hdev->vport_lock); in hclge_set_vlan_filter()
10568 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_set_vlan_filter()
10569 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { in hclge_set_vlan_filter()
10570 set_bit(vlan_id, vport->vlan_del_fail_bmap); in hclge_set_vlan_filter()
10571 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10572 return -EBUSY; in hclge_set_vlan_filter()
10573 } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) { in hclge_set_vlan_filter()
10574 clear_bit(vlan_id, vport->vlan_del_fail_bmap); in hclge_set_vlan_filter()
10576 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10584 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { in hclge_set_vlan_filter()
10585 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, in hclge_set_vlan_filter()
10595 mutex_lock(&hdev->vport_lock); in hclge_set_vlan_filter()
10597 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10604 mutex_lock(&hdev->vport_lock); in hclge_set_vlan_filter()
10605 set_bit(vlan_id, vport->vlan_del_fail_bmap); in hclge_set_vlan_filter()
10606 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10620 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_vlan_fltr_state()
10621 vport = &hdev->vport[i]; in hclge_sync_vlan_fltr_state()
10623 &vport->state)) in hclge_sync_vlan_fltr_state()
10627 vport->req_vlan_fltr_en); in hclge_sync_vlan_fltr_state()
10629 dev_err(&hdev->pdev->dev, in hclge_sync_vlan_fltr_state()
10631 vport->vport_id, ret); in hclge_sync_vlan_fltr_state()
10633 &vport->state); in hclge_sync_vlan_fltr_state()
10646 mutex_lock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10648 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_vlan_filter()
10649 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_vlan_filter()
10651 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, in hclge_sync_vlan_filter()
10655 vport->vport_id, vlan_id, in hclge_sync_vlan_filter()
10657 if (ret && ret != -EINVAL) { in hclge_sync_vlan_filter()
10658 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10662 clear_bit(vlan_id, vport->vlan_del_fail_bmap); in hclge_sync_vlan_filter()
10668 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10672 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, in hclge_sync_vlan_filter()
10676 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10689 req->max_frm_size = cpu_to_le16(new_mps); in hclge_set_mac_mtu()
10690 req->min_frm_size = HCLGE_MAC_MIN_FRAME; in hclge_set_mac_mtu()
10692 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_mac_mtu()
10704 struct hclge_dev *hdev = vport->back; in hclge_set_vport_mtu()
10710 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size) in hclge_set_vport_mtu()
10711 return -EINVAL; in hclge_set_vport_mtu()
10714 mutex_lock(&hdev->vport_lock); in hclge_set_vport_mtu()
10715 /* VF's mps must fit within hdev->mps */ in hclge_set_vport_mtu()
10716 if (vport->vport_id && max_frm_size > hdev->mps) { in hclge_set_vport_mtu()
10717 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10718 return -EINVAL; in hclge_set_vport_mtu()
10719 } else if (vport->vport_id) { in hclge_set_vport_mtu()
10720 vport->mps = max_frm_size; in hclge_set_vport_mtu()
10721 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10726 for (i = 1; i < hdev->num_alloc_vport; i++) in hclge_set_vport_mtu()
10727 if (max_frm_size < hdev->vport[i].mps) { in hclge_set_vport_mtu()
10728 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10730 i, hdev->vport[i].mps); in hclge_set_vport_mtu()
10731 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10732 return -EINVAL; in hclge_set_vport_mtu()
10739 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10744 hdev->mps = max_frm_size; in hclge_set_vport_mtu()
10745 vport->mps = max_frm_size; in hclge_set_vport_mtu()
10749 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10754 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10768 req->tqp_id = cpu_to_le16(queue_id); in hclge_reset_tqp_cmd_send()
10770 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); in hclge_reset_tqp_cmd_send()
10772 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_reset_tqp_cmd_send()
10774 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd_send()
10792 req->tqp_id = cpu_to_le16(queue_id); in hclge_get_reset_status()
10794 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_reset_status()
10796 dev_err(&hdev->pdev->dev, in hclge_get_reset_status()
10801 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); in hclge_get_reset_status()
10811 queue = handle->kinfo.tqp[queue_id]; in hclge_covert_handle_qid_global()
10814 return tqp->index; in hclge_covert_handle_qid_global()
10820 struct hclge_dev *hdev = vport->back; in hclge_reset_tqp_cmd()
10827 for (i = 0; i < handle->kinfo.num_tqps; i++) { in hclge_reset_tqp_cmd()
10831 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10851 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10853 return -ETIME; in hclge_reset_tqp_cmd()
10858 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10874 struct hclge_dev *hdev = vport->back; in hclge_reset_rcb()
10885 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1); in hclge_reset_rcb()
10886 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid); in hclge_reset_rcb()
10887 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps); in hclge_reset_rcb()
10889 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_reset_rcb()
10891 dev_err(&hdev->pdev->dev, in hclge_reset_rcb()
10896 return_status = req->fun_reset_rcb_return_status; in hclge_reset_rcb()
10901 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", in hclge_reset_rcb()
10903 return -EIO; in hclge_reset_rcb()
10915 struct hclge_dev *hdev = vport->back; in hclge_reset_tqp()
10919 if (!vport->vport_id) { in hclge_reset_tqp()
10922 dev_err(&hdev->pdev->dev, in hclge_reset_tqp()
10934 struct hclge_dev *hdev = vport->back; in hclge_get_fw_version()
10936 return hdev->fw_version; in hclge_get_fw_version()
10948 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_scc_version()
10952 *scc_version = le32_to_cpu(resp->scc_version); in hclge_query_scc_version()
10959 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_flowctrl_adv()
10971 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) in hclge_cfg_pauseparam()
10976 dev_err(&hdev->pdev->dev, in hclge_cfg_pauseparam()
10984 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_cfg_flowctrl()
10990 if (!phydev->link) in hclge_cfg_flowctrl()
10993 if (!phydev->autoneg) in hclge_cfg_flowctrl()
10996 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); in hclge_cfg_flowctrl()
10998 if (phydev->pause) in hclge_cfg_flowctrl()
11001 if (phydev->asym_pause) in hclge_cfg_flowctrl()
11009 if (phydev->duplex == HCLGE_MAC_HALF) { in hclge_cfg_flowctrl()
11021 struct hclge_dev *hdev = vport->back; in hclge_get_pauseparam()
11022 u8 media_type = hdev->hw.mac.media_type; in hclge_get_pauseparam()
11027 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_get_pauseparam()
11033 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { in hclge_get_pauseparam()
11036 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { in hclge_get_pauseparam()
11039 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { in hclge_get_pauseparam()
11052 hdev->fc_mode_last_time = HCLGE_FC_FULL; in hclge_record_user_pauseparam()
11054 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; in hclge_record_user_pauseparam()
11056 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; in hclge_record_user_pauseparam()
11058 hdev->fc_mode_last_time = HCLGE_FC_NONE; in hclge_record_user_pauseparam()
11060 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; in hclge_record_user_pauseparam()
11067 struct hclge_dev *hdev = vport->back; in hclge_set_pauseparam()
11068 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_pauseparam()
11074 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
11075 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); in hclge_set_pauseparam()
11076 return -EOPNOTSUPP; in hclge_set_pauseparam()
11080 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_set_pauseparam()
11081 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
11083 return -EOPNOTSUPP; in hclge_set_pauseparam()
11096 return -EOPNOTSUPP; in hclge_set_pauseparam()
11103 struct hclge_dev *hdev = vport->back; in hclge_get_ksettings_an_result()
11106 *speed = hdev->hw.mac.speed; in hclge_get_ksettings_an_result()
11108 *duplex = hdev->hw.mac.duplex; in hclge_get_ksettings_an_result()
11110 *auto_neg = hdev->hw.mac.autoneg; in hclge_get_ksettings_an_result()
11112 *lane_num = hdev->hw.mac.lane_num; in hclge_get_ksettings_an_result()
11119 struct hclge_dev *hdev = vport->back; in hclge_get_media_type()
11128 *media_type = hdev->hw.mac.media_type; in hclge_get_media_type()
11131 *module_type = hdev->hw.mac.module_type; in hclge_get_media_type()
11138 struct hclge_dev *hdev = vport->back; in hclge_get_mdix_mode()
11139 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mdix_mode()
11186 struct hnae3_handle *handle = &hdev->vport->nic; in hclge_info_show()
11187 struct device *dev = &hdev->pdev->dev; in hclge_info_show()
11191 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); in hclge_info_show()
11192 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); in hclge_info_show()
11193 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); in hclge_info_show()
11194 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); in hclge_info_show()
11195 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); in hclge_info_show()
11196 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); in hclge_info_show()
11197 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); in hclge_info_show()
11198 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); in hclge_info_show()
11199 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); in hclge_info_show()
11201 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); in hclge_info_show()
11203 str_enable_disable(handle->kinfo.tc_info.dcb_ets_active)); in hclge_info_show()
11205 str_enable_disable(handle->kinfo.tc_info.mqprio_active)); in hclge_info_show()
11207 hdev->tx_spare_buf_size); in hclge_info_show()
11215 struct hnae3_client *client = vport->nic.client; in hclge_init_nic_client_instance()
11216 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_nic_client_instance()
11217 int rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_nic_client_instance()
11220 ret = client->ops->init_instance(&vport->nic); in hclge_init_nic_client_instance()
11224 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
11225 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_nic_client_instance()
11226 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_nic_client_instance()
11227 ret = -EBUSY; in hclge_init_nic_client_instance()
11234 dev_err(&ae_dev->pdev->dev, in hclge_init_nic_client_instance()
11241 if (netif_msg_drv(&hdev->vport->nic)) in hclge_init_nic_client_instance()
11247 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
11248 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_nic_client_instance()
11251 client->ops->uninit_instance(&vport->nic, 0); in hclge_init_nic_client_instance()
11259 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_roce_client_instance()
11264 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || in hclge_init_roce_client_instance()
11265 !hdev->nic_client) in hclge_init_roce_client_instance()
11268 client = hdev->roce_client; in hclge_init_roce_client_instance()
11273 rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_roce_client_instance()
11274 ret = client->ops->init_instance(&vport->roce); in hclge_init_roce_client_instance()
11278 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
11279 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_roce_client_instance()
11280 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_roce_client_instance()
11281 ret = -EBUSY; in hclge_init_roce_client_instance()
11288 dev_err(&ae_dev->pdev->dev, in hclge_init_roce_client_instance()
11298 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
11299 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_roce_client_instance()
11302 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); in hclge_init_roce_client_instance()
11310 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_client_instance()
11311 struct hclge_vport *vport = &hdev->vport[0]; in hclge_init_client_instance()
11314 switch (client->type) { in hclge_init_client_instance()
11316 hdev->nic_client = client; in hclge_init_client_instance()
11317 vport->nic.client = client; in hclge_init_client_instance()
11329 hdev->roce_client = client; in hclge_init_client_instance()
11330 vport->roce.client = client; in hclge_init_client_instance()
11339 return -EINVAL; in hclge_init_client_instance()
11345 hdev->nic_client = NULL; in hclge_init_client_instance()
11346 vport->nic.client = NULL; in hclge_init_client_instance()
11349 hdev->roce_client = NULL; in hclge_init_client_instance()
11350 vport->roce.client = NULL; in hclge_init_client_instance()
11356 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_uninit_need_wait()
11357 test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_uninit_need_wait()
11363 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_client_instance()
11364 struct hclge_vport *vport = &hdev->vport[0]; in hclge_uninit_client_instance()
11366 if (hdev->roce_client) { in hclge_uninit_client_instance()
11367 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
11371 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); in hclge_uninit_client_instance()
11372 hdev->roce_client = NULL; in hclge_uninit_client_instance()
11373 vport->roce.client = NULL; in hclge_uninit_client_instance()
11375 if (client->type == HNAE3_CLIENT_ROCE) in hclge_uninit_client_instance()
11377 if (hdev->nic_client && client->ops->uninit_instance) { in hclge_uninit_client_instance()
11378 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
11379 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_uninit_client_instance()
11382 client->ops->uninit_instance(&vport->nic, 0); in hclge_uninit_client_instance()
11383 hdev->nic_client = NULL; in hclge_uninit_client_instance()
11384 vport->nic.client = NULL; in hclge_uninit_client_instance()
11390 struct pci_dev *pdev = hdev->pdev; in hclge_dev_mem_map()
11391 struct hclge_hw *hw = &hdev->hw; in hclge_dev_mem_map()
11397 hw->hw.mem_base = in hclge_dev_mem_map()
11398 devm_ioremap_wc(&pdev->dev, in hclge_dev_mem_map()
11401 if (!hw->hw.mem_base) { in hclge_dev_mem_map()
11402 dev_err(&pdev->dev, "failed to map device memory\n"); in hclge_dev_mem_map()
11403 return -EFAULT; in hclge_dev_mem_map()
11411 struct pci_dev *pdev = hdev->pdev; in hclge_pci_init()
11417 dev_err(&pdev->dev, "failed to enable PCI device\n"); in hclge_pci_init()
11421 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in hclge_pci_init()
11423 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in hclge_pci_init()
11425 dev_err(&pdev->dev, in hclge_pci_init()
11429 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); in hclge_pci_init()
11434 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); in hclge_pci_init()
11439 hw = &hdev->hw; in hclge_pci_init()
11440 hw->hw.io_base = pcim_iomap(pdev, 2, 0); in hclge_pci_init()
11441 if (!hw->hw.io_base) { in hclge_pci_init()
11442 dev_err(&pdev->dev, "Can't map configuration register space\n"); in hclge_pci_init()
11443 ret = -ENOMEM; in hclge_pci_init()
11451 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); in hclge_pci_init()
11456 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_pci_init()
11467 struct pci_dev *pdev = hdev->pdev; in hclge_pci_uninit()
11469 if (hdev->hw.hw.mem_base) in hclge_pci_uninit()
11470 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); in hclge_pci_uninit()
11472 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_pci_uninit()
11480 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); in hclge_state_init()
11481 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_init()
11482 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); in hclge_state_init()
11483 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_state_init()
11484 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_state_init()
11485 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); in hclge_state_init()
11486 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_state_init()
11491 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_uninit()
11492 set_bit(HCLGE_STATE_REMOVING, &hdev->state); in hclge_state_uninit()
11494 if (hdev->reset_timer.function) in hclge_state_uninit()
11495 timer_delete_sync(&hdev->reset_timer); in hclge_state_uninit()
11496 if (hdev->service_task.work.func) in hclge_state_uninit()
11497 cancel_delayed_work_sync(&hdev->service_task); in hclge_state_uninit()
11506 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_prepare_general()
11511 down(&hdev->reset_sem); in hclge_reset_prepare_general()
11512 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_prepare_general()
11513 hdev->reset_type = rst_type; in hclge_reset_prepare_general()
11515 if (!ret && !hdev->reset_pending) in hclge_reset_prepare_general()
11518 dev_err(&hdev->pdev->dev, in hclge_reset_prepare_general()
11520 ret, hdev->reset_pending, retry_cnt); in hclge_reset_prepare_general()
11521 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_prepare_general()
11522 up(&hdev->reset_sem); in hclge_reset_prepare_general()
11527 hclge_enable_vector(&hdev->misc_vector, false); in hclge_reset_prepare_general()
11528 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_reset_prepare_general()
11530 if (hdev->reset_type == HNAE3_FLR_RESET) in hclge_reset_prepare_general()
11531 hdev->rst_stats.flr_rst_cnt++; in hclge_reset_prepare_general()
11536 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_done()
11539 hclge_enable_vector(&hdev->misc_vector, true); in hclge_reset_done()
11543 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); in hclge_reset_done()
11545 hdev->reset_type = HNAE3_NONE_RESET; in hclge_reset_done()
11546 if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_reset_done()
11547 up(&hdev->reset_sem); in hclge_reset_done()
11554 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_clear_resetting_state()
11555 struct hclge_vport *vport = &hdev->vport[i]; in hclge_clear_resetting_state()
11559 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); in hclge_clear_resetting_state()
11561 dev_warn(&hdev->pdev->dev, in hclge_clear_resetting_state()
11563 vport->vport_id, ret); in hclge_clear_resetting_state()
11574 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_clear_hw_resource()
11576 * fail with older firmware. Error value -EOPNOSUPP can only be in hclge_clear_hw_resource()
11581 if (ret && ret != -EOPNOTSUPP) { in hclge_clear_hw_resource()
11582 dev_err(&hdev->pdev->dev, in hclge_clear_hw_resource()
11591 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) in hclge_init_rxd_adv_layout()
11592 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1); in hclge_init_rxd_adv_layout()
11597 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) in hclge_uninit_rxd_adv_layout()
11598 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0); in hclge_uninit_rxd_adv_layout()
11605 return &vport->back->hw.mac.wol; in hclge_get_wol_info()
11619 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_wol_supported_mode()
11621 dev_err(&hdev->pdev->dev, in hclge_get_wol_supported_mode()
11626 *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode); in hclge_get_wol_supported_mode()
11640 wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode); in hclge_set_wol_cfg()
11641 wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size; in hclge_set_wol_cfg()
11642 memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX); in hclge_set_wol_cfg()
11644 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_wol_cfg()
11646 dev_err(&hdev->pdev->dev, in hclge_set_wol_cfg()
11654 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; in hclge_update_wol()
11656 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) in hclge_update_wol()
11664 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; in hclge_init_wol()
11667 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) in hclge_init_wol()
11672 &wol_info->wol_support_mode); in hclge_init_wol()
11674 wol_info->wol_support_mode = 0; in hclge_init_wol()
11686 wol->supported = wol_info->wol_support_mode; in hclge_get_wol()
11687 wol->wolopts = wol_info->wol_current_mode; in hclge_get_wol()
11688 if (wol_info->wol_current_mode & WAKE_MAGICSECURE) in hclge_get_wol()
11689 memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX); in hclge_get_wol()
11700 wol_mode = wol->wolopts; in hclge_set_wol()
11701 if (wol_mode & ~wol_info->wol_support_mode) in hclge_set_wol()
11702 return -EINVAL; in hclge_set_wol()
11704 wol_info->wol_current_mode = wol_mode; in hclge_set_wol()
11706 memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX); in hclge_set_wol()
11707 wol_info->wol_sopass_size = SOPASS_MAX; in hclge_set_wol()
11709 wol_info->wol_sopass_size = 0; in hclge_set_wol()
11712 ret = hclge_set_wol_cfg(vport->back, wol_info); in hclge_set_wol()
11714 wol_info->wol_current_mode = 0; in hclge_set_wol()
11721 struct pci_dev *pdev = ae_dev->pdev; in hclge_init_ae_dev()
11725 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); in hclge_init_ae_dev()
11727 return -ENOMEM; in hclge_init_ae_dev()
11729 hdev->pdev = pdev; in hclge_init_ae_dev()
11730 hdev->ae_dev = ae_dev; in hclge_init_ae_dev()
11731 hdev->reset_type = HNAE3_NONE_RESET; in hclge_init_ae_dev()
11732 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_init_ae_dev()
11733 ae_dev->priv = hdev; in hclge_init_ae_dev()
11736 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; in hclge_init_ae_dev()
11738 mutex_init(&hdev->vport_lock); in hclge_init_ae_dev()
11739 spin_lock_init(&hdev->fd_rule_lock); in hclge_init_ae_dev()
11740 sema_init(&hdev->reset_sem, 1); in hclge_init_ae_dev()
11747 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); in hclge_init_ae_dev()
11752 hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops); in hclge_init_ae_dev()
11753 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, in hclge_init_ae_dev()
11754 true, hdev->reset_pending); in hclge_init_ae_dev()
11768 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n", in hclge_init_ae_dev()
11775 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); in hclge_init_ae_dev()
11781 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); in hclge_init_ae_dev()
11791 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); in hclge_init_ae_dev()
11803 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { in hclge_init_ae_dev()
11804 clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); in hclge_init_ae_dev()
11820 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); in hclge_init_ae_dev()
11826 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); in hclge_init_ae_dev()
11836 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); in hclge_init_ae_dev()
11842 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); in hclge_init_ae_dev()
11846 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev, in hclge_init_ae_dev()
11847 &hdev->rss_cfg); in hclge_init_ae_dev()
11849 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); in hclge_init_ae_dev()
11855 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); in hclge_init_ae_dev()
11861 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); in hclge_init_ae_dev()
11867 dev_err(&pdev->dev, in hclge_init_ae_dev()
11880 INIT_KFIFO(hdev->mac_tnl_log); in hclge_init_ae_dev()
11884 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); in hclge_init_ae_dev()
11885 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); in hclge_init_ae_dev()
11899 if (ae_dev->hw_err_reset_req) { in hclge_init_ae_dev()
11903 &ae_dev->hw_err_reset_req); in hclge_init_ae_dev()
11905 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_init_ae_dev()
11912 dev_warn(&pdev->dev, in hclge_init_ae_dev()
11920 hdev->last_reset_time = jiffies; in hclge_init_ae_dev()
11923 enable_irq(hdev->misc_vector.vector_irq); in hclge_init_ae_dev()
11924 hclge_enable_vector(&hdev->misc_vector, true); in hclge_init_ae_dev()
11926 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", in hclge_init_ae_dev()
11935 if (hdev->hw.mac.phydev) in hclge_init_ae_dev()
11936 mdiobus_unregister(hdev->hw.mac.mdio_bus); in hclge_init_ae_dev()
11942 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); in hclge_init_ae_dev()
11944 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_init_ae_dev()
11948 mutex_destroy(&hdev->vport_lock); in hclge_init_ae_dev()
11954 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); in hclge_stats_clear()
11955 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats)); in hclge_stats_clear()
11977 dev_err(&hdev->pdev->dev, in hclge_set_vf_spoofchk_hw()
11985 dev_err(&hdev->pdev->dev, in hclge_set_vf_spoofchk_hw()
11996 struct hclge_dev *hdev = vport->back; in hclge_set_vf_spoofchk()
12000 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vf_spoofchk()
12001 return -EOPNOTSUPP; in hclge_set_vf_spoofchk()
12005 return -EINVAL; in hclge_set_vf_spoofchk()
12007 if (vport->vf_info.spoofchk == new_spoofchk) in hclge_set_vf_spoofchk()
12010 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) in hclge_set_vf_spoofchk()
12011 dev_warn(&hdev->pdev->dev, in hclge_set_vf_spoofchk()
12015 dev_warn(&hdev->pdev->dev, in hclge_set_vf_spoofchk()
12019 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); in hclge_set_vf_spoofchk()
12023 vport->vf_info.spoofchk = new_spoofchk; in hclge_set_vf_spoofchk()
12029 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_spoofchk()
12033 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_reset_vport_spoofchk()
12037 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_spoofchk()
12038 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, in hclge_reset_vport_spoofchk()
12039 vport->vf_info.spoofchk); in hclge_reset_vport_spoofchk()
12052 struct hclge_dev *hdev = vport->back; in hclge_set_vf_trust()
12057 return -EINVAL; in hclge_set_vf_trust()
12059 if (vport->vf_info.trusted == new_trusted) in hclge_set_vf_trust()
12062 vport->vf_info.trusted = new_trusted; in hclge_set_vf_trust()
12063 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); in hclge_set_vf_trust()
12075 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { in hclge_reset_vf_rate()
12076 struct hclge_vport *vport = &hdev->vport[vf]; in hclge_reset_vf_rate()
12078 vport->vf_info.max_tx_rate = 0; in hclge_reset_vf_rate()
12079 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); in hclge_reset_vf_rate()
12081 dev_err(&hdev->pdev->dev, in hclge_reset_vf_rate()
12083 vf - HCLGE_VF_VPORT_START_NUM, ret); in hclge_reset_vf_rate()
12091 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { in hclge_vf_rate_param_check()
12092 dev_err(&hdev->pdev->dev, in hclge_vf_rate_param_check()
12094 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); in hclge_vf_rate_param_check()
12095 return -EINVAL; in hclge_vf_rate_param_check()
12105 struct hclge_dev *hdev = vport->back; in hclge_set_vf_rate()
12114 return -EINVAL; in hclge_set_vf_rate()
12116 if (!force && max_tx_rate == vport->vf_info.max_tx_rate) in hclge_set_vf_rate()
12123 vport->vf_info.max_tx_rate = max_tx_rate; in hclge_set_vf_rate()
12130 struct hnae3_handle *handle = &hdev->vport->nic; in hclge_resume_vf_rate()
12136 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { in hclge_resume_vf_rate()
12139 return -EINVAL; in hclge_resume_vf_rate()
12144 if (!vport->vf_info.max_tx_rate) in hclge_resume_vf_rate()
12148 vport->vf_info.max_tx_rate, true); in hclge_resume_vf_rate()
12150 dev_err(&hdev->pdev->dev, in hclge_resume_vf_rate()
12152 vf, vport->vf_info.max_tx_rate, ret); in hclge_resume_vf_rate()
12162 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_state()
12165 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_state()
12166 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_reset_vport_state()
12173 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_ae_dev()
12174 struct pci_dev *pdev = ae_dev->pdev; in hclge_reset_ae_dev()
12177 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_reset_ae_dev()
12183 if (hdev->reset_type == HNAE3_IMP_RESET || in hclge_reset_ae_dev()
12184 hdev->reset_type == HNAE3_GLOBAL_RESET) { in hclge_reset_ae_dev()
12185 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); in hclge_reset_ae_dev()
12186 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); in hclge_reset_ae_dev()
12187 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); in hclge_reset_ae_dev()
12191 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, in hclge_reset_ae_dev()
12192 true, hdev->reset_pending); in hclge_reset_ae_dev()
12194 dev_err(&pdev->dev, "Cmd queue init failed\n"); in hclge_reset_ae_dev()
12200 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); in hclge_reset_ae_dev()
12206 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); in hclge_reset_ae_dev()
12212 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n", in hclge_reset_ae_dev()
12219 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); in hclge_reset_ae_dev()
12229 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); in hclge_reset_ae_dev()
12237 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); in hclge_reset_ae_dev()
12243 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); in hclge_reset_ae_dev()
12249 dev_err(&pdev->dev, in hclge_reset_ae_dev()
12256 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); in hclge_reset_ae_dev()
12270 /* Re-enable the hw error interrupts because in hclge_reset_ae_dev()
12275 dev_err(&pdev->dev, in hclge_reset_ae_dev()
12276 "fail(%d) to re-enable NIC hw error interrupts\n", in hclge_reset_ae_dev()
12281 if (hdev->roce_client) { in hclge_reset_ae_dev()
12284 dev_err(&pdev->dev, in hclge_reset_ae_dev()
12285 "fail(%d) to re-enable roce ras interrupts\n", in hclge_reset_ae_dev()
12304 dev_warn(&pdev->dev, in hclge_reset_ae_dev()
12307 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", in hclge_reset_ae_dev()
12315 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_ae_dev()
12316 struct hclge_mac *mac = &hdev->hw.mac; in hclge_uninit_ae_dev()
12326 if (mac->phydev) in hclge_uninit_ae_dev()
12327 mdiobus_unregister(mac->mdio_bus); in hclge_uninit_ae_dev()
12330 hclge_enable_vector(&hdev->misc_vector, false); in hclge_uninit_ae_dev()
12331 disable_irq(hdev->misc_vector.vector_irq); in hclge_uninit_ae_dev()
12338 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); in hclge_uninit_ae_dev()
12343 mutex_destroy(&hdev->vport_lock); in hclge_uninit_ae_dev()
12344 ae_dev->priv = NULL; in hclge_uninit_ae_dev()
12350 struct hclge_dev *hdev = vport->back; in hclge_get_max_channels()
12352 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); in hclge_get_max_channels()
12358 ch->max_combined = hclge_get_max_channels(handle); in hclge_get_channels()
12359 ch->other_count = 1; in hclge_get_channels()
12360 ch->max_other = 1; in hclge_get_channels()
12361 ch->combined_count = handle->kinfo.rss_size; in hclge_get_channels()
12368 struct hclge_dev *hdev = vport->back; in hclge_get_tqps_and_rss_info()
12370 *alloc_tqps = vport->alloc_tqps; in hclge_get_tqps_and_rss_info()
12371 *max_rss_size = hdev->pf_rss_size_max; in hclge_get_tqps_and_rss_info()
12378 struct hclge_dev *hdev = vport->back; in hclge_set_rss_tc_mode_cfg()
12384 roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size); in hclge_set_rss_tc_mode_cfg()
12390 if (!(hdev->hw_tc_map & BIT(i))) in hclge_set_rss_tc_mode_cfg()
12395 tc_offset[i] = vport->nic.kinfo.rss_size * i; in hclge_set_rss_tc_mode_cfg()
12398 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, in hclge_set_rss_tc_mode_cfg()
12405 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); in hclge_set_channels()
12407 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; in hclge_set_channels()
12408 struct hclge_dev *hdev = vport->back; in hclge_set_channels()
12409 u16 cur_rss_size = kinfo->rss_size; in hclge_set_channels()
12410 u16 cur_tqps = kinfo->num_tqps; in hclge_set_channels()
12415 kinfo->req_rss_size = new_tqps_num; in hclge_set_channels()
12419 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); in hclge_set_channels()
12432 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32), in hclge_set_channels()
12435 return -ENOMEM; in hclge_set_channels()
12437 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) in hclge_set_channels()
12438 rss_indir[i] = i % kinfo->rss_size; in hclge_set_channels()
12442 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", in hclge_set_channels()
12449 dev_info(&hdev->pdev->dev, in hclge_set_channels()
12451 cur_rss_size, kinfo->rss_size, in hclge_set_channels()
12452 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); in hclge_set_channels()
12466 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, in hclge_set_led_status()
12469 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_led_status()
12471 dev_err(&hdev->pdev->dev, in hclge_set_led_status()
12487 struct hclge_dev *hdev = vport->back; in hclge_set_led_id()
12495 return -EINVAL; in hclge_set_led_id()
12505 struct hclge_dev *hdev = vport->back; in hclge_get_link_mode()
12509 supported[idx] = hdev->hw.mac.supported[idx]; in hclge_get_link_mode()
12510 advertising[idx] = hdev->hw.mac.advertising[idx]; in hclge_get_link_mode()
12517 struct hclge_dev *hdev = vport->back; in hclge_gro_en()
12518 bool gro_en_old = hdev->gro_en; in hclge_gro_en()
12521 hdev->gro_en = enable; in hclge_gro_en()
12524 hdev->gro_en = gro_en_old; in hclge_gro_en()
12531 struct hnae3_handle *handle = &vport->nic; in hclge_sync_vport_promisc_mode()
12532 struct hclge_dev *hdev = vport->back; in hclge_sync_vport_promisc_mode()
12539 if (vport->last_promisc_flags != vport->overflow_promisc_flags) { in hclge_sync_vport_promisc_mode()
12540 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); in hclge_sync_vport_promisc_mode()
12541 vport->last_promisc_flags = vport->overflow_promisc_flags; in hclge_sync_vport_promisc_mode()
12545 &vport->state)) in hclge_sync_vport_promisc_mode()
12549 if (!vport->vport_id) { in hclge_sync_vport_promisc_mode()
12550 tmp_flags = handle->netdev_flags | vport->last_promisc_flags; in hclge_sync_vport_promisc_mode()
12555 &vport->state); in hclge_sync_vport_promisc_mode()
12558 &vport->state); in hclge_sync_vport_promisc_mode()
12563 if (vport->vf_info.trusted) { in hclge_sync_vport_promisc_mode()
12564 uc_en = vport->vf_info.request_uc_en > 0 || in hclge_sync_vport_promisc_mode()
12565 vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE; in hclge_sync_vport_promisc_mode()
12566 mc_en = vport->vf_info.request_mc_en > 0 || in hclge_sync_vport_promisc_mode()
12567 vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE; in hclge_sync_vport_promisc_mode()
12569 bc_en = vport->vf_info.request_bc_en > 0; in hclge_sync_vport_promisc_mode()
12571 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, in hclge_sync_vport_promisc_mode()
12574 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); in hclge_sync_vport_promisc_mode()
12588 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_promisc_mode()
12589 vport = &hdev->vport[i]; in hclge_sync_promisc_mode()
12604 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_module_existed()
12606 dev_err(&hdev->pdev->dev, in hclge_module_existed()
12635 if (i < HCLGE_SFP_INFO_CMD_NUM - 1) in hclge_get_sfp_eeprom_info()
12641 sfp_info_bd0->offset = cpu_to_le16((u16)offset); in hclge_get_sfp_eeprom_info()
12643 sfp_info_bd0->read_len = cpu_to_le16(read_len); in hclge_get_sfp_eeprom_info()
12645 ret = hclge_cmd_send(&hdev->hw, desc, i); in hclge_get_sfp_eeprom_info()
12647 dev_err(&hdev->pdev->dev, in hclge_get_sfp_eeprom_info()
12654 memcpy(data, sfp_info_bd0->data, copy_len); in hclge_get_sfp_eeprom_info()
12662 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN); in hclge_get_sfp_eeprom_info()
12674 struct hclge_dev *hdev = vport->back; in hclge_get_module_eeprom()
12678 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) in hclge_get_module_eeprom()
12679 return -EOPNOTSUPP; in hclge_get_module_eeprom()
12682 return -ENXIO; in hclge_get_module_eeprom()
12687 len - read_len, in hclge_get_module_eeprom()
12690 return -EIO; in hclge_get_module_eeprom()
12702 struct hclge_dev *hdev = vport->back; in hclge_get_link_diagnosis_info()
12706 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) in hclge_get_link_diagnosis_info()
12707 return -EOPNOTSUPP; in hclge_get_link_diagnosis_info()
12710 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_link_diagnosis_info()
12712 dev_err(&hdev->pdev->dev, in hclge_get_link_diagnosis_info()
12726 struct hclge_dev *hdev = vport->back; in hclge_clear_vport_vf_info()
12730 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); in hclge_clear_vport_vf_info()
12731 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_clear_vport_vf_info()
12732 vport->need_notify = 0; in hclge_clear_vport_vf_info()
12733 vport->mps = 0; in hclge_clear_vport_vf_info()
12738 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12749 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12753 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); in hclge_clear_vport_vf_info()
12755 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12759 memset(&vport->vf_info, 0, sizeof(vport->vf_info)); in hclge_clear_vport_vf_info()
12764 struct hclge_dev *hdev = ae_dev->priv; in hclge_clean_vport_config()
12769 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; in hclge_clean_vport_config()
12781 return -EINVAL; in hclge_get_dscp_prio()
12784 *tc_mode = vport->nic.kinfo.tc_map_mode; in hclge_get_dscp_prio()
12786 *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : in hclge_get_dscp_prio()
12787 vport->nic.kinfo.dscp_prio[dscp]; in hclge_get_dscp_prio()
12912 return -ENOMEM; in hclge_init()