Lines Matching +full:self +full:- +full:advertising
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
322 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
323 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
324 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
325 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
326 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
327 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
328 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
329 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
330 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
331 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
332 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
333 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
334 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
335 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
336 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
337 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
347 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
381 * hclge_cmd_send - send command to command queue
391 return hclge_comm_cmd_send(&hw->hw, desc, num); in hclge_cmd_send()
398 u64 *data = (u64 *)(&hdev->mac_stats); in hclge_mac_update_stats_defective()
406 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); in hclge_mac_update_stats_defective()
408 dev_err(&hdev->pdev->dev, in hclge_mac_update_stats_defective()
414 /* The first desc has a 64-bit header, so data size need to minus 1 */ in hclge_mac_update_stats_defective()
415 data_size = sizeof(desc) / (sizeof(u64)) - 1; in hclge_mac_update_stats_defective()
434 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; in hclge_mac_update_stats_complete()
435 u64 *data = (u64 *)(&hdev->mac_stats); in hclge_mac_update_stats_complete()
443 /* The first desc has a 64-bit header, so need to consider it */ in hclge_mac_update_stats_complete()
451 return -ENOMEM; in hclge_mac_update_stats_complete()
454 ret = hclge_cmd_send(&hdev->hw, desc, desc_num); in hclge_mac_update_stats_complete()
460 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); in hclge_mac_update_stats_complete()
487 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { in hclge_mac_query_reg_num()
493 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_mac_query_reg_num()
495 dev_err(&hdev->pdev->dev, in hclge_mac_query_reg_num()
503 dev_err(&hdev->pdev->dev, in hclge_mac_query_reg_num()
505 return -ENODATA; in hclge_mac_query_reg_num()
514 if (hdev->ae_dev->dev_specs.mac_stats_num) in hclge_mac_update_stats()
528 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_count()
542 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_stats()
545 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); in hclge_comm_get_stats()
563 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_strings()
578 handle = &hdev->vport[0].nic; in hclge_update_stats_for_all()
579 if (handle->client) { in hclge_update_stats_for_all()
580 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); in hclge_update_stats_for_all()
582 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
592 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
599 struct hclge_dev *hdev = vport->back; in hclge_update_stats()
602 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) in hclge_update_stats()
607 dev_err(&hdev->pdev->dev, in hclge_update_stats()
611 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); in hclge_update_stats()
613 dev_err(&hdev->pdev->dev, in hclge_update_stats()
617 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); in hclge_update_stats()
629 struct hclge_dev *hdev = vport->back; in hclge_get_sset_count()
639 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); in hclge_get_sset_count()
640 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || in hclge_get_sset_count()
641 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || in hclge_get_sset_count()
642 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || in hclge_get_sset_count()
643 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { in hclge_get_sset_count()
645 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; in hclge_get_sset_count()
649 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; in hclge_get_sset_count()
651 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; in hclge_get_sset_count()
653 handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK; in hclge_get_sset_count()
655 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && in hclge_get_sset_count()
656 hdev->hw.mac.phydev->drv->set_loopback) || in hclge_get_sset_count()
659 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; in hclge_get_sset_count()
674 struct hclge_dev *hdev = vport->back; in hclge_get_strings()
684 if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) { in hclge_get_strings()
689 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { in hclge_get_strings()
694 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { in hclge_get_strings()
699 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { in hclge_get_strings()
705 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { in hclge_get_strings()
716 struct hclge_dev *hdev = vport->back; in hclge_get_stats()
728 struct hclge_dev *hdev = vport->back; in hclge_get_mac_stat()
732 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; in hclge_get_mac_stat()
733 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; in hclge_get_mac_stat()
741 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) in hclge_parse_func_status()
742 return -EINVAL; in hclge_parse_func_status()
745 if (status->pf_state & HCLGE_PF_STATE_MAIN) in hclge_parse_func_status()
746 hdev->flag |= HCLGE_FLAG_MAIN; in hclge_parse_func_status()
748 hdev->flag &= ~HCLGE_FLAG_MAIN; in hclge_parse_func_status()
750 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; in hclge_parse_func_status()
767 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_function_status()
769 dev_err(&hdev->pdev->dev, in hclge_query_function_status()
775 if (req->pf_state) in hclge_query_function_status()
790 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_pf_resource()
792 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
798 hdev->num_tqps = le16_to_cpu(req->tqp_num) + in hclge_query_pf_resource()
799 le16_to_cpu(req->ext_tqp_num); in hclge_query_pf_resource()
800 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
802 if (req->tx_buf_size) in hclge_query_pf_resource()
803 hdev->tx_buf_size = in hclge_query_pf_resource()
804 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
806 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; in hclge_query_pf_resource()
808 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
810 if (req->dv_buf_size) in hclge_query_pf_resource()
811 hdev->dv_buf_size = in hclge_query_pf_resource()
812 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
814 hdev->dv_buf_size = HCLGE_DEFAULT_DV; in hclge_query_pf_resource()
816 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
818 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); in hclge_query_pf_resource()
819 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { in hclge_query_pf_resource()
820 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
822 hdev->num_nic_msi); in hclge_query_pf_resource()
823 return -EINVAL; in hclge_query_pf_resource()
827 hdev->num_roce_msi = in hclge_query_pf_resource()
828 le16_to_cpu(req->pf_intr_vector_number_roce); in hclge_query_pf_resource()
833 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; in hclge_query_pf_resource()
835 hdev->num_msi = hdev->num_nic_msi; in hclge_query_pf_resource()
872 return -EINVAL; in hclge_parse_speed()
901 return -EINVAL; in hclge_get_speed_bit()
907 struct hclge_dev *hdev = vport->back; in hclge_check_port_speed()
908 u32 speed_ability = hdev->hw.mac.speed_ability; in hclge_check_port_speed()
919 return -EINVAL; in hclge_check_port_speed()
924 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported); in hclge_update_fec_support()
925 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); in hclge_update_fec_support()
926 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported); in hclge_update_fec_support()
927 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); in hclge_update_fec_support()
929 if (mac->fec_ability & BIT(HNAE3_FEC_BASER)) in hclge_update_fec_support()
931 mac->supported); in hclge_update_fec_support()
932 if (mac->fec_ability & BIT(HNAE3_FEC_RS)) in hclge_update_fec_support()
934 mac->supported); in hclge_update_fec_support()
935 if (mac->fec_ability & BIT(HNAE3_FEC_LLRS)) in hclge_update_fec_support()
937 mac->supported); in hclge_update_fec_support()
938 if (mac->fec_ability & BIT(HNAE3_FEC_NONE)) in hclge_update_fec_support()
940 mac->supported); in hclge_update_fec_support()
1040 if (mac->fec_ability) in hclge_convert_setting_fec()
1043 switch (mac->speed) { in hclge_convert_setting_fec()
1046 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) | in hclge_convert_setting_fec()
1051 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) | in hclge_convert_setting_fec()
1055 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | in hclge_convert_setting_fec()
1059 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | in hclge_convert_setting_fec()
1063 mac->fec_ability = 0; in hclge_convert_setting_fec()
1074 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_fiber_link_mode()
1078 mac->supported); in hclge_parse_fiber_link_mode()
1080 hclge_convert_setting_sr(speed_ability, mac->supported); in hclge_parse_fiber_link_mode()
1081 hclge_convert_setting_lr(speed_ability, mac->supported); in hclge_parse_fiber_link_mode()
1082 hclge_convert_setting_cr(speed_ability, mac->supported); in hclge_parse_fiber_link_mode()
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); in hclge_parse_fiber_link_mode()
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); in hclge_parse_fiber_link_mode()
1090 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); in hclge_parse_fiber_link_mode()
1096 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_backplane_link_mode()
1098 hclge_convert_setting_kr(speed_ability, mac->supported); in hclge_parse_backplane_link_mode()
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); in hclge_parse_backplane_link_mode()
1105 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); in hclge_parse_backplane_link_mode()
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); in hclge_parse_backplane_link_mode()
1112 unsigned long *supported = hdev->hw.mac.supported; in hclge_parse_copper_link_mode()
1145 u8 media_type = hdev->hw.mac.media_type; in hclge_parse_link_mode()
1201 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), in hclge_parse_cfg()
1203 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), in hclge_parse_cfg()
1207 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1210 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1213 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1217 mac_addr_tmp = __le32_to_cpu(req->param[2]); in hclge_parse_cfg()
1218 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), in hclge_parse_cfg()
1224 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), in hclge_parse_cfg()
1227 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), in hclge_parse_cfg()
1232 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; in hclge_parse_cfg()
1235 cfg->numa_node_map = __le32_to_cpu(req->param[0]); in hclge_parse_cfg()
1237 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1240 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1243 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT; in hclge_parse_cfg()
1245 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1249 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1253 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), in hclge_parse_cfg()
1264 cfg->pf_rss_size_max = cfg->pf_rss_size_max ? in hclge_parse_cfg()
1265 1U << cfg->pf_rss_size_max : in hclge_parse_cfg()
1266 cfg->vf_rss_size_max; in hclge_parse_cfg()
1272 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]), in hclge_parse_cfg()
1275 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT; in hclge_parse_cfg()
1300 req->offset = cpu_to_le32(offset); in hclge_get_cfg()
1303 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); in hclge_get_cfg()
1305 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); in hclge_get_cfg()
1318 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_set_default_dev_specs()
1320 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; in hclge_set_default_dev_specs()
1321 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; in hclge_set_default_dev_specs()
1322 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; in hclge_set_default_dev_specs()
1323 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; in hclge_set_default_dev_specs()
1324 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; in hclge_set_default_dev_specs()
1325 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; in hclge_set_default_dev_specs()
1326 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; in hclge_set_default_dev_specs()
1327 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; in hclge_set_default_dev_specs()
1328 ae_dev->dev_specs.tnl_num = 0; in hclge_set_default_dev_specs()
1334 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_parse_dev_specs()
1341 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; in hclge_parse_dev_specs()
1342 ae_dev->dev_specs.rss_ind_tbl_size = in hclge_parse_dev_specs()
1343 le16_to_cpu(req0->rss_ind_tbl_size); in hclge_parse_dev_specs()
1344 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); in hclge_parse_dev_specs()
1345 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); in hclge_parse_dev_specs()
1346 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); in hclge_parse_dev_specs()
1347 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); in hclge_parse_dev_specs()
1348 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); in hclge_parse_dev_specs()
1349 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); in hclge_parse_dev_specs()
1350 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); in hclge_parse_dev_specs()
1351 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); in hclge_parse_dev_specs()
1352 ae_dev->dev_specs.tnl_num = req1->tnl_num; in hclge_parse_dev_specs()
1357 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; in hclge_check_dev_specs()
1359 if (!dev_specs->max_non_tso_bd_num) in hclge_check_dev_specs()
1360 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; in hclge_check_dev_specs()
1361 if (!dev_specs->rss_ind_tbl_size) in hclge_check_dev_specs()
1362 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; in hclge_check_dev_specs()
1363 if (!dev_specs->rss_key_size) in hclge_check_dev_specs()
1364 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; in hclge_check_dev_specs()
1365 if (!dev_specs->max_tm_rate) in hclge_check_dev_specs()
1366 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; in hclge_check_dev_specs()
1367 if (!dev_specs->max_qset_num) in hclge_check_dev_specs()
1368 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM; in hclge_check_dev_specs()
1369 if (!dev_specs->max_int_gl) in hclge_check_dev_specs()
1370 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; in hclge_check_dev_specs()
1371 if (!dev_specs->max_frm_size) in hclge_check_dev_specs()
1372 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; in hclge_check_dev_specs()
1373 if (!dev_specs->umv_size) in hclge_check_dev_specs()
1374 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; in hclge_check_dev_specs()
1383 if (ret && ret != -EOPNOTSUPP) in hclge_query_mac_stats_num()
1386 hdev->ae_dev->dev_specs.mac_stats_num = reg_num; in hclge_query_mac_stats_num()
1403 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { in hclge_query_dev_specs()
1408 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) { in hclge_query_dev_specs()
1415 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); in hclge_query_dev_specs()
1431 dev_err(&hdev->pdev->dev, in hclge_get_cap()
1448 dev_info(&hdev->pdev->dev, in hclge_init_kdump_kernel_config()
1452 hdev->num_tqps = hdev->num_req_vfs + 1; in hclge_init_kdump_kernel_config()
1453 hdev->num_tx_desc = HCLGE_MIN_TX_DESC; in hclge_init_kdump_kernel_config()
1454 hdev->num_rx_desc = HCLGE_MIN_RX_DESC; in hclge_init_kdump_kernel_config()
1461 if (hdev->tc_max > HNAE3_MAX_TC || in hclge_init_tc_config()
1462 hdev->tc_max < 1) { in hclge_init_tc_config()
1463 dev_warn(&hdev->pdev->dev, "TC num = %u.\n", in hclge_init_tc_config()
1464 hdev->tc_max); in hclge_init_tc_config()
1465 hdev->tc_max = 1; in hclge_init_tc_config()
1470 hdev->tc_max = 1; in hclge_init_tc_config()
1471 hdev->pfc_max = 0; in hclge_init_tc_config()
1473 hdev->pfc_max = hdev->tc_max; in hclge_init_tc_config()
1476 hdev->tm_info.num_tc = 1; in hclge_init_tc_config()
1479 for (i = 0; i < hdev->tm_info.num_tc; i++) in hclge_init_tc_config()
1480 hnae3_set_bit(hdev->hw_tc_map, i, 1); in hclge_init_tc_config()
1482 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; in hclge_init_tc_config()
1487 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_configure()
1495 hdev->base_tqp_pid = 0; in hclge_configure()
1496 hdev->vf_rss_size_max = cfg.vf_rss_size_max; in hclge_configure()
1497 hdev->pf_rss_size_max = cfg.pf_rss_size_max; in hclge_configure()
1498 hdev->rx_buf_len = cfg.rx_buf_len; in hclge_configure()
1499 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); in hclge_configure()
1500 hdev->hw.mac.media_type = cfg.media_type; in hclge_configure()
1501 hdev->hw.mac.phy_addr = cfg.phy_addr; in hclge_configure()
1502 hdev->num_tx_desc = cfg.tqp_desc_num; in hclge_configure()
1503 hdev->num_rx_desc = cfg.tqp_desc_num; in hclge_configure()
1504 hdev->tm_info.num_pg = 1; in hclge_configure()
1505 hdev->tc_max = cfg.tc_num; in hclge_configure()
1506 hdev->tm_info.hw_pfc_map = 0; in hclge_configure()
1508 hdev->wanted_umv_size = cfg.umv_space; in hclge_configure()
1510 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; in hclge_configure()
1511 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; in hclge_configure()
1512 hdev->gro_en = true; in hclge_configure()
1514 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); in hclge_configure()
1516 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_configure()
1517 hdev->fd_en = true; in hclge_configure()
1518 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_configure()
1521 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); in hclge_configure()
1523 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", in hclge_configure()
1530 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); in hclge_configure()
1547 req->tso_mss_min = cpu_to_le16(tso_mss_min); in hclge_config_tso()
1548 req->tso_mss_max = cpu_to_le16(tso_mss_max); in hclge_config_tso()
1550 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_tso()
1559 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) in hclge_config_gro()
1565 req->gro_en = hdev->gro_en ? 1 : 0; in hclge_config_gro()
1567 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_gro()
1569 dev_err(&hdev->pdev->dev, in hclge_config_gro()
1577 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_alloc_tqps()
1581 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, in hclge_alloc_tqps()
1583 if (!hdev->htqp) in hclge_alloc_tqps()
1584 return -ENOMEM; in hclge_alloc_tqps()
1586 tqp = hdev->htqp; in hclge_alloc_tqps()
1588 for (i = 0; i < hdev->num_tqps; i++) { in hclge_alloc_tqps()
1589 tqp->dev = &hdev->pdev->dev; in hclge_alloc_tqps()
1590 tqp->index = i; in hclge_alloc_tqps()
1592 tqp->q.ae_algo = &ae_algo; in hclge_alloc_tqps()
1593 tqp->q.buf_size = hdev->rx_buf_len; in hclge_alloc_tqps()
1594 tqp->q.tx_desc_num = hdev->num_tx_desc; in hclge_alloc_tqps()
1595 tqp->q.rx_desc_num = hdev->num_rx_desc; in hclge_alloc_tqps()
1601 tqp->q.io_base = hdev->hw.hw.io_base + in hclge_alloc_tqps()
1605 tqp->q.io_base = hdev->hw.hw.io_base + in hclge_alloc_tqps()
1608 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) * in hclge_alloc_tqps()
1615 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) in hclge_alloc_tqps()
1616 tqp->q.mem_base = hdev->hw.hw.mem_base + in hclge_alloc_tqps()
1635 req->tqp_id = cpu_to_le16(tqp_pid); in hclge_map_tqps_to_func()
1636 req->tqp_vf = func_id; in hclge_map_tqps_to_func()
1637 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B; in hclge_map_tqps_to_func()
1639 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B; in hclge_map_tqps_to_func()
1640 req->tqp_vid = cpu_to_le16(tqp_vid); in hclge_map_tqps_to_func()
1642 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_map_tqps_to_func()
1644 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); in hclge_map_tqps_to_func()
1651 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; in hclge_assign_tqp()
1652 struct hclge_dev *hdev = vport->back; in hclge_assign_tqp()
1655 for (i = 0, alloced = 0; i < hdev->num_tqps && in hclge_assign_tqp()
1657 if (!hdev->htqp[i].alloced) { in hclge_assign_tqp()
1658 hdev->htqp[i].q.handle = &vport->nic; in hclge_assign_tqp()
1659 hdev->htqp[i].q.tqp_index = alloced; in hclge_assign_tqp()
1660 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; in hclge_assign_tqp()
1661 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; in hclge_assign_tqp()
1662 kinfo->tqp[alloced] = &hdev->htqp[i].q; in hclge_assign_tqp()
1663 hdev->htqp[i].alloced = true; in hclge_assign_tqp()
1667 vport->alloc_tqps = alloced; in hclge_assign_tqp()
1668 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, in hclge_assign_tqp()
1669 vport->alloc_tqps / hdev->tm_info.num_tc); in hclge_assign_tqp()
1672 kinfo->rss_size = min_t(u16, kinfo->rss_size, in hclge_assign_tqp()
1673 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); in hclge_assign_tqp()
1682 struct hnae3_handle *nic = &vport->nic; in hclge_knic_setup()
1683 struct hnae3_knic_private_info *kinfo = &nic->kinfo; in hclge_knic_setup()
1684 struct hclge_dev *hdev = vport->back; in hclge_knic_setup()
1687 kinfo->num_tx_desc = num_tx_desc; in hclge_knic_setup()
1688 kinfo->num_rx_desc = num_rx_desc; in hclge_knic_setup()
1690 kinfo->rx_buf_len = hdev->rx_buf_len; in hclge_knic_setup()
1691 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size; in hclge_knic_setup()
1693 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, in hclge_knic_setup()
1695 if (!kinfo->tqp) in hclge_knic_setup()
1696 return -ENOMEM; in hclge_knic_setup()
1700 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); in hclge_knic_setup()
1708 struct hnae3_handle *nic = &vport->nic; in hclge_map_tqp_to_vport()
1712 kinfo = &nic->kinfo; in hclge_map_tqp_to_vport()
1713 for (i = 0; i < vport->alloc_tqps; i++) { in hclge_map_tqp_to_vport()
1715 container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); in hclge_map_tqp_to_vport()
1719 is_pf = !(vport->vport_id); in hclge_map_tqp_to_vport()
1720 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, in hclge_map_tqp_to_vport()
1731 struct hclge_vport *vport = hdev->vport; in hclge_map_tqp()
1734 num_vport = hdev->num_req_vfs + 1; in hclge_map_tqp()
1750 struct hnae3_handle *nic = &vport->nic; in hclge_vport_setup()
1751 struct hclge_dev *hdev = vport->back; in hclge_vport_setup()
1754 nic->pdev = hdev->pdev; in hclge_vport_setup()
1755 nic->ae_algo = &ae_algo; in hclge_vport_setup()
1756 nic->numa_node_mask = hdev->numa_node_mask; in hclge_vport_setup()
1757 nic->kinfo.io_base = hdev->hw.hw.io_base; in hclge_vport_setup()
1760 hdev->num_tx_desc, hdev->num_rx_desc); in hclge_vport_setup()
1762 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); in hclge_vport_setup()
1769 struct pci_dev *pdev = hdev->pdev; in hclge_alloc_vport()
1777 num_vport = hdev->num_req_vfs + 1; in hclge_alloc_vport()
1779 if (hdev->num_tqps < num_vport) { in hclge_alloc_vport()
1780 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", in hclge_alloc_vport()
1781 hdev->num_tqps, num_vport); in hclge_alloc_vport()
1782 return -EINVAL; in hclge_alloc_vport()
1786 tqp_per_vport = hdev->num_tqps / num_vport; in hclge_alloc_vport()
1787 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; in hclge_alloc_vport()
1789 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), in hclge_alloc_vport()
1792 return -ENOMEM; in hclge_alloc_vport()
1794 hdev->vport = vport; in hclge_alloc_vport()
1795 hdev->num_alloc_vport = num_vport; in hclge_alloc_vport()
1798 hdev->num_alloc_vfs = hdev->num_req_vfs; in hclge_alloc_vport()
1801 vport->back = hdev; in hclge_alloc_vport()
1802 vport->vport_id = i; in hclge_alloc_vport()
1803 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; in hclge_alloc_vport()
1804 vport->mps = HCLGE_MAC_DEFAULT_FRAME; in hclge_alloc_vport()
1805 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; in hclge_alloc_vport()
1806 vport->port_base_vlan_cfg.tbl_sta = true; in hclge_alloc_vport()
1807 vport->rxvlan_cfg.rx_vlan_offload_en = true; in hclge_alloc_vport()
1808 vport->req_vlan_fltr_en = true; in hclge_alloc_vport()
1809 INIT_LIST_HEAD(&vport->vlan_list); in hclge_alloc_vport()
1810 INIT_LIST_HEAD(&vport->uc_mac_list); in hclge_alloc_vport()
1811 INIT_LIST_HEAD(&vport->mc_mac_list); in hclge_alloc_vport()
1812 spin_lock_init(&vport->mac_list_lock); in hclge_alloc_vport()
1819 dev_err(&pdev->dev, in hclge_alloc_vport()
1846 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; in hclge_cmd_alloc_tx_buff()
1848 req->tx_pkt_buff[i] = in hclge_cmd_alloc_tx_buff()
1853 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_alloc_tx_buff()
1855 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", in hclge_cmd_alloc_tx_buff()
1867 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); in hclge_tx_buffer_alloc()
1878 if (hdev->hw_tc_map & BIT(i)) in hclge_get_tc_num()
1892 priv = &buf_alloc->priv_buf[i]; in hclge_get_pfc_priv_num()
1893 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_pfc_priv_num()
1894 priv->enable) in hclge_get_pfc_priv_num()
1910 priv = &buf_alloc->priv_buf[i]; in hclge_get_no_pfc_priv_num()
1911 if (hdev->hw_tc_map & BIT(i) && in hclge_get_no_pfc_priv_num()
1912 !(hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_no_pfc_priv_num()
1913 priv->enable) in hclge_get_no_pfc_priv_num()
1927 priv = &buf_alloc->priv_buf[i]; in hclge_get_rx_priv_buff_alloced()
1928 if (priv->enable) in hclge_get_rx_priv_buff_alloced()
1929 rx_priv += priv->buf_size; in hclge_get_rx_priv_buff_alloced()
1939 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; in hclge_get_tx_buff_alloced()
1954 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_is_rx_buf_ok()
1958 hdev->dv_buf_size; in hclge_is_rx_buf_ok()
1961 + hdev->dv_buf_size; in hclge_is_rx_buf_ok()
1971 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); in hclge_is_rx_buf_ok()
1972 buf_alloc->s_buf.buf_size = shared_buf; in hclge_is_rx_buf_ok()
1974 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
1975 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high in hclge_is_rx_buf_ok()
1976 - roundup(aligned_mps / HCLGE_BUF_DIV_BY, in hclge_is_rx_buf_ok()
1979 buf_alloc->s_buf.self.high = aligned_mps + in hclge_is_rx_buf_ok()
1981 buf_alloc->s_buf.self.low = aligned_mps; in hclge_is_rx_buf_ok()
1985 hi_thrd = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
1996 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY; in hclge_is_rx_buf_ok()
2003 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; in hclge_is_rx_buf_ok()
2004 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; in hclge_is_rx_buf_ok()
2015 total_size = hdev->pkt_buf_size; in hclge_tx_buffer_calc()
2019 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_tx_buffer_calc()
2021 if (hdev->hw_tc_map & BIT(i)) { in hclge_tx_buffer_calc()
2022 if (total_size < hdev->tx_buf_size) in hclge_tx_buffer_calc()
2023 return -ENOMEM; in hclge_tx_buffer_calc()
2025 priv->tx_buf_size = hdev->tx_buf_size; in hclge_tx_buffer_calc()
2027 priv->tx_buf_size = 0; in hclge_tx_buffer_calc()
2030 total_size -= priv->tx_buf_size; in hclge_tx_buffer_calc()
2039 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_rx_buf_calc_all()
2040 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_rx_buf_calc_all()
2044 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_rx_buf_calc_all()
2046 priv->enable = 0; in hclge_rx_buf_calc_all()
2047 priv->wl.low = 0; in hclge_rx_buf_calc_all()
2048 priv->wl.high = 0; in hclge_rx_buf_calc_all()
2049 priv->buf_size = 0; in hclge_rx_buf_calc_all()
2051 if (!(hdev->hw_tc_map & BIT(i))) in hclge_rx_buf_calc_all()
2054 priv->enable = 1; in hclge_rx_buf_calc_all()
2056 if (hdev->tm_info.hw_pfc_map & BIT(i)) { in hclge_rx_buf_calc_all()
2057 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; in hclge_rx_buf_calc_all()
2058 priv->wl.high = roundup(priv->wl.low + aligned_mps, in hclge_rx_buf_calc_all()
2061 priv->wl.low = 0; in hclge_rx_buf_calc_all()
2062 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : in hclge_rx_buf_calc_all()
2066 priv->buf_size = priv->wl.high + hdev->dv_buf_size; in hclge_rx_buf_calc_all()
2075 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_nopfc_buf_till_fit()
2080 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { in hclge_drop_nopfc_buf_till_fit()
2081 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_drop_nopfc_buf_till_fit()
2084 if (hdev->hw_tc_map & mask && in hclge_drop_nopfc_buf_till_fit()
2085 !(hdev->tm_info.hw_pfc_map & mask)) { in hclge_drop_nopfc_buf_till_fit()
2087 priv->wl.low = 0; in hclge_drop_nopfc_buf_till_fit()
2088 priv->wl.high = 0; in hclge_drop_nopfc_buf_till_fit()
2089 priv->buf_size = 0; in hclge_drop_nopfc_buf_till_fit()
2090 priv->enable = 0; in hclge_drop_nopfc_buf_till_fit()
2091 no_pfc_priv_num--; in hclge_drop_nopfc_buf_till_fit()
2105 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_pfc_buf_till_fit()
2110 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { in hclge_drop_pfc_buf_till_fit()
2111 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_drop_pfc_buf_till_fit()
2114 if (hdev->hw_tc_map & mask && in hclge_drop_pfc_buf_till_fit()
2115 hdev->tm_info.hw_pfc_map & mask) { in hclge_drop_pfc_buf_till_fit()
2117 priv->wl.low = 0; in hclge_drop_pfc_buf_till_fit()
2118 priv->enable = 0; in hclge_drop_pfc_buf_till_fit()
2119 priv->wl.high = 0; in hclge_drop_pfc_buf_till_fit()
2120 priv->buf_size = 0; in hclge_drop_pfc_buf_till_fit()
2121 pfc_priv_num--; in hclge_drop_pfc_buf_till_fit()
2139 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_only_alloc_priv_buff()
2141 u32 half_mps = hdev->mps >> 1; in hclge_only_alloc_priv_buff()
2151 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + in hclge_only_alloc_priv_buff()
2159 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_only_alloc_priv_buff()
2161 priv->enable = 0; in hclge_only_alloc_priv_buff()
2162 priv->wl.low = 0; in hclge_only_alloc_priv_buff()
2163 priv->wl.high = 0; in hclge_only_alloc_priv_buff()
2164 priv->buf_size = 0; in hclge_only_alloc_priv_buff()
2166 if (!(hdev->hw_tc_map & BIT(i))) in hclge_only_alloc_priv_buff()
2169 priv->enable = 1; in hclge_only_alloc_priv_buff()
2170 priv->buf_size = rx_priv; in hclge_only_alloc_priv_buff()
2171 priv->wl.high = rx_priv - hdev->dv_buf_size; in hclge_only_alloc_priv_buff()
2172 priv->wl.low = priv->wl.high - PRIV_WL_GAP; in hclge_only_alloc_priv_buff()
2175 buf_alloc->s_buf.buf_size = 0; in hclge_only_alloc_priv_buff()
2190 u32 rx_all = hdev->pkt_buf_size; in hclge_rx_buffer_calc()
2192 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); in hclge_rx_buffer_calc()
2194 return -ENOMEM; in hclge_rx_buffer_calc()
2215 return -ENOMEM; in hclge_rx_buffer_calc()
2231 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_rx_priv_buf_alloc()
2233 req->buf_num[i] = in hclge_rx_priv_buf_alloc()
2234 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); in hclge_rx_priv_buf_alloc()
2235 req->buf_num[i] |= in hclge_rx_priv_buf_alloc()
2239 req->shared_buf = in hclge_rx_priv_buf_alloc()
2240 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | in hclge_rx_priv_buf_alloc()
2243 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_rx_priv_buf_alloc()
2245 dev_err(&hdev->pdev->dev, in hclge_rx_priv_buf_alloc()
2274 priv = &buf_alloc->priv_buf[idx]; in hclge_rx_priv_wl_config()
2275 req->tc_wl[j].high = in hclge_rx_priv_wl_config()
2276 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); in hclge_rx_priv_wl_config()
2277 req->tc_wl[j].high |= in hclge_rx_priv_wl_config()
2279 req->tc_wl[j].low = in hclge_rx_priv_wl_config()
2280 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); in hclge_rx_priv_wl_config()
2281 req->tc_wl[j].low |= in hclge_rx_priv_wl_config()
2287 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_rx_priv_wl_config()
2289 dev_err(&hdev->pdev->dev, in hclge_rx_priv_wl_config()
2298 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; in hclge_common_thrd_config()
2317 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; in hclge_common_thrd_config()
2319 req->com_thrd[j].high = in hclge_common_thrd_config()
2320 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); in hclge_common_thrd_config()
2321 req->com_thrd[j].high |= in hclge_common_thrd_config()
2323 req->com_thrd[j].low = in hclge_common_thrd_config()
2324 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); in hclge_common_thrd_config()
2325 req->com_thrd[j].low |= in hclge_common_thrd_config()
2331 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_common_thrd_config()
2333 dev_err(&hdev->pdev->dev, in hclge_common_thrd_config()
2341 struct hclge_shared_buf *buf = &buf_alloc->s_buf; in hclge_common_wl_config()
2349 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); in hclge_common_wl_config()
2350 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); in hclge_common_wl_config()
2352 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); in hclge_common_wl_config()
2353 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); in hclge_common_wl_config()
2355 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_common_wl_config()
2357 dev_err(&hdev->pdev->dev, in hclge_common_wl_config()
2370 return -ENOMEM; in hclge_buffer_alloc()
2374 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2381 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2388 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2396 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", in hclge_buffer_alloc()
2404 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2412 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2421 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2431 struct hnae3_handle *roce = &vport->roce; in hclge_init_roce_base_info()
2432 struct hnae3_handle *nic = &vport->nic; in hclge_init_roce_base_info()
2433 struct hclge_dev *hdev = vport->back; in hclge_init_roce_base_info()
2435 roce->rinfo.num_vectors = vport->back->num_roce_msi; in hclge_init_roce_base_info()
2437 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) in hclge_init_roce_base_info()
2438 return -EINVAL; in hclge_init_roce_base_info()
2440 roce->rinfo.base_vector = hdev->num_nic_msi; in hclge_init_roce_base_info()
2442 roce->rinfo.netdev = nic->kinfo.netdev; in hclge_init_roce_base_info()
2443 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; in hclge_init_roce_base_info()
2444 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; in hclge_init_roce_base_info()
2446 roce->pdev = nic->pdev; in hclge_init_roce_base_info()
2447 roce->ae_algo = nic->ae_algo; in hclge_init_roce_base_info()
2448 roce->numa_node_mask = nic->numa_node_mask; in hclge_init_roce_base_info()
2455 struct pci_dev *pdev = hdev->pdev; in hclge_init_msi()
2460 hdev->num_msi, in hclge_init_msi()
2463 dev_err(&pdev->dev, in hclge_init_msi()
2464 "failed(%d) to allocate MSI/MSI-X vectors\n", in hclge_init_msi()
2468 if (vectors < hdev->num_msi) in hclge_init_msi()
2469 dev_warn(&hdev->pdev->dev, in hclge_init_msi()
2470 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", in hclge_init_msi()
2471 hdev->num_msi, vectors); in hclge_init_msi()
2473 hdev->num_msi = vectors; in hclge_init_msi()
2474 hdev->num_msi_left = vectors; in hclge_init_msi()
2476 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2478 if (!hdev->vector_status) { in hclge_init_msi()
2480 return -ENOMEM; in hclge_init_msi()
2483 for (i = 0; i < hdev->num_msi; i++) in hclge_init_msi()
2484 hdev->vector_status[i] = HCLGE_INVALID_VPORT; in hclge_init_msi()
2486 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2488 if (!hdev->vector_irq) { in hclge_init_msi()
2490 return -ENOMEM; in hclge_init_msi()
2527 return -EINVAL; in hclge_convert_to_fw_speed()
2543 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); in hclge_cfg_mac_speed_dup_hw()
2547 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); in hclge_cfg_mac_speed_dup_hw()
2551 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S, in hclge_cfg_mac_speed_dup_hw()
2553 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, in hclge_cfg_mac_speed_dup_hw()
2555 req->lane_num = lane_num; in hclge_cfg_mac_speed_dup_hw()
2557 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_speed_dup_hw()
2559 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_speed_dup_hw()
2569 struct hclge_mac *mac = &hdev->hw.mac; in hclge_cfg_mac_speed_dup()
2573 if (!mac->support_autoneg && mac->speed == speed && in hclge_cfg_mac_speed_dup()
2574 mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0)) in hclge_cfg_mac_speed_dup()
2581 hdev->hw.mac.speed = speed; in hclge_cfg_mac_speed_dup()
2582 hdev->hw.mac.duplex = duplex; in hclge_cfg_mac_speed_dup()
2584 hdev->hw.mac.lane_num = lane_num; in hclge_cfg_mac_speed_dup()
2593 struct hclge_dev *hdev = vport->back; in hclge_cfg_mac_speed_dup_h()
2610 req->cfg_an_cmd_flag = cpu_to_le32(flag); in hclge_set_autoneg_en()
2612 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_autoneg_en()
2614 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", in hclge_set_autoneg_en()
2623 struct hclge_dev *hdev = vport->back; in hclge_set_autoneg()
2625 if (!hdev->hw.mac.support_autoneg) { in hclge_set_autoneg()
2627 dev_err(&hdev->pdev->dev, in hclge_set_autoneg()
2629 return -EOPNOTSUPP; in hclge_set_autoneg()
2641 struct hclge_dev *hdev = vport->back; in hclge_get_autoneg()
2642 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_autoneg()
2645 return phydev->autoneg; in hclge_get_autoneg()
2647 return hdev->hw.mac.autoneg; in hclge_get_autoneg()
2653 struct hclge_dev *hdev = vport->back; in hclge_restart_autoneg()
2656 dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); in hclge_restart_autoneg()
2667 struct hclge_dev *hdev = vport->back; in hclge_halt_autoneg()
2669 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) in hclge_halt_autoneg()
2692 hdev->fec_stats.per_lanes[i] += in hclge_parse_fec_stats_lanes()
2705 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num; in hclge_parse_fec_stats()
2706 hdev->fec_stats.rs_corr_blocks += in hclge_parse_fec_stats()
2707 le32_to_cpu(req->rs_fec_corr_blocks); in hclge_parse_fec_stats()
2708 hdev->fec_stats.rs_uncorr_blocks += in hclge_parse_fec_stats()
2709 le32_to_cpu(req->rs_fec_uncorr_blocks); in hclge_parse_fec_stats()
2710 hdev->fec_stats.rs_error_blocks += in hclge_parse_fec_stats()
2711 le32_to_cpu(req->rs_fec_error_blocks); in hclge_parse_fec_stats()
2712 hdev->fec_stats.base_r_corr_blocks += in hclge_parse_fec_stats()
2713 le32_to_cpu(req->base_r_fec_corr_blocks); in hclge_parse_fec_stats()
2714 hdev->fec_stats.base_r_uncorr_blocks += in hclge_parse_fec_stats()
2715 le32_to_cpu(req->base_r_fec_uncorr_blocks); in hclge_parse_fec_stats()
2717 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1); in hclge_parse_fec_stats()
2729 if (i != (HCLGE_FEC_STATS_CMD_NUM - 1)) in hclge_update_fec_stats_hw()
2733 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM); in hclge_update_fec_stats_hw()
2744 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_update_fec_stats()
2748 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state)) in hclge_update_fec_stats()
2753 dev_err(&hdev->pdev->dev, in hclge_update_fec_stats()
2756 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state); in hclge_update_fec_stats()
2762 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks; in hclge_get_fec_stats_total()
2763 fec_stats->uncorrectable_blocks.total = in hclge_get_fec_stats_total()
2764 hdev->fec_stats.rs_uncorr_blocks; in hclge_get_fec_stats_total()
2772 if (hdev->fec_stats.base_r_lane_num == 0 || in hclge_get_fec_stats_lanes()
2773 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) { in hclge_get_fec_stats_lanes()
2774 dev_err(&hdev->pdev->dev, in hclge_get_fec_stats_lanes()
2776 hdev->fec_stats.base_r_lane_num); in hclge_get_fec_stats_lanes()
2780 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) { in hclge_get_fec_stats_lanes()
2781 fec_stats->corrected_blocks.lanes[i] = in hclge_get_fec_stats_lanes()
2782 hdev->fec_stats.base_r_corr_per_lanes[i]; in hclge_get_fec_stats_lanes()
2783 fec_stats->uncorrectable_blocks.lanes[i] = in hclge_get_fec_stats_lanes()
2784 hdev->fec_stats.base_r_uncorr_per_lanes[i]; in hclge_get_fec_stats_lanes()
2791 u32 fec_mode = hdev->hw.mac.fec_mode; in hclge_comm_get_fec_stats()
2802 dev_err(&hdev->pdev->dev, in hclge_comm_get_fec_stats()
2813 struct hclge_dev *hdev = vport->back; in hclge_get_fec_stats()
2814 u32 fec_mode = hdev->hw.mac.fec_mode; in hclge_get_fec_stats()
2836 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); in hclge_set_fec_hw()
2838 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, in hclge_set_fec_hw()
2841 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, in hclge_set_fec_hw()
2844 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, in hclge_set_fec_hw()
2847 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fec_hw()
2849 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); in hclge_set_fec_hw()
2857 struct hclge_dev *hdev = vport->back; in hclge_set_fec()
2858 struct hclge_mac *mac = &hdev->hw.mac; in hclge_set_fec()
2861 if (fec_mode && !(mac->fec_ability & fec_mode)) { in hclge_set_fec()
2862 dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); in hclge_set_fec()
2863 return -EINVAL; in hclge_set_fec()
2870 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); in hclge_set_fec()
2878 struct hclge_dev *hdev = vport->back; in hclge_get_fec()
2879 struct hclge_mac *mac = &hdev->hw.mac; in hclge_get_fec()
2882 *fec_ability = mac->fec_ability; in hclge_get_fec()
2884 *fec_mode = mac->fec_mode; in hclge_get_fec()
2889 struct hclge_mac *mac = &hdev->hw.mac; in hclge_mac_init()
2892 hdev->support_sfp_query = true; in hclge_mac_init()
2893 hdev->hw.mac.duplex = HCLGE_MAC_FULL; in hclge_mac_init()
2894 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, in hclge_mac_init()
2895 hdev->hw.mac.duplex, hdev->hw.mac.lane_num); in hclge_mac_init()
2899 if (hdev->hw.mac.support_autoneg) { in hclge_mac_init()
2900 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); in hclge_mac_init()
2905 mac->link = 0; in hclge_mac_init()
2907 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { in hclge_mac_init()
2908 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); in hclge_mac_init()
2913 ret = hclge_set_mac_mtu(hdev, hdev->mps); in hclge_mac_init()
2915 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); in hclge_mac_init()
2925 dev_err(&hdev->pdev->dev, in hclge_mac_init()
2933 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_mbx_task_schedule()
2934 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { in hclge_mbx_task_schedule()
2935 hdev->last_mbx_scheduled = jiffies; in hclge_mbx_task_schedule()
2936 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_mbx_task_schedule()
2942 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_reset_task_schedule()
2943 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && in hclge_reset_task_schedule()
2944 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { in hclge_reset_task_schedule()
2945 hdev->last_rst_scheduled = jiffies; in hclge_reset_task_schedule()
2946 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_reset_task_schedule()
2952 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_errhand_task_schedule()
2953 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) in hclge_errhand_task_schedule()
2954 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_errhand_task_schedule()
2959 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_task_schedule()
2960 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) in hclge_task_schedule()
2961 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); in hclge_task_schedule()
2971 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_mac_link_status()
2973 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", in hclge_get_mac_link_status()
2979 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? in hclge_get_mac_link_status()
2987 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mac_phy_link()
2991 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) in hclge_get_mac_phy_link()
2994 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) in hclge_get_mac_phy_link()
3006 for (i = 0; i < pci_num_vf(hdev->pdev); i++) { in hclge_push_link_status()
3007 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; in hclge_push_link_status()
3009 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) || in hclge_push_link_status()
3010 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO) in hclge_push_link_status()
3015 dev_err(&hdev->pdev->dev, in hclge_push_link_status()
3024 struct hnae3_handle *rhandle = &hdev->vport[0].roce; in hclge_update_link_status()
3025 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_update_link_status()
3026 struct hnae3_client *rclient = hdev->roce_client; in hclge_update_link_status()
3027 struct hnae3_client *client = hdev->nic_client; in hclge_update_link_status()
3034 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) in hclge_update_link_status()
3039 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_update_link_status()
3043 if (state != hdev->hw.mac.link) { in hclge_update_link_status()
3044 hdev->hw.mac.link = state; in hclge_update_link_status()
3048 client->ops->link_status_change(handle, state); in hclge_update_link_status()
3050 if (rclient && rclient->ops->link_status_change) in hclge_update_link_status()
3051 rclient->ops->link_status_change(rhandle, state); in hclge_update_link_status()
3056 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_update_link_status()
3063 if (hclge_get_speed_bit(mac->speed, &speed_ability)) in hclge_update_speed_advertising()
3066 switch (mac->module_type) { in hclge_update_speed_advertising()
3068 hclge_convert_setting_lr(speed_ability, mac->advertising); in hclge_update_speed_advertising()
3072 hclge_convert_setting_sr(speed_ability, mac->advertising); in hclge_update_speed_advertising()
3075 hclge_convert_setting_cr(speed_ability, mac->advertising); in hclge_update_speed_advertising()
3078 hclge_convert_setting_kr(speed_ability, mac->advertising); in hclge_update_speed_advertising()
3087 if (mac->fec_mode & BIT(HNAE3_FEC_RS)) in hclge_update_fec_advertising()
3089 mac->advertising); in hclge_update_fec_advertising()
3090 else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS)) in hclge_update_fec_advertising()
3092 mac->advertising); in hclge_update_fec_advertising()
3093 else if (mac->fec_mode & BIT(HNAE3_FEC_BASER)) in hclge_update_fec_advertising()
3095 mac->advertising); in hclge_update_fec_advertising()
3098 mac->advertising); in hclge_update_fec_advertising()
3103 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_pause_advertising()
3106 switch (hdev->fc_mode_last_time) { in hclge_update_pause_advertising()
3125 linkmode_set_pause(mac->advertising, tx_en, rx_en); in hclge_update_pause_advertising()
3130 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_advertising()
3132 linkmode_zero(mac->advertising); in hclge_update_advertising()
3147 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && in hclge_update_port_capability()
3148 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) in hclge_update_port_capability()
3149 mac->module_type = HNAE3_MODULE_TYPE_KR; in hclge_update_port_capability()
3150 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) in hclge_update_port_capability()
3151 mac->module_type = HNAE3_MODULE_TYPE_TP; in hclge_update_port_capability()
3153 if (mac->support_autoneg) { in hclge_update_port_capability()
3154 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); in hclge_update_port_capability()
3155 linkmode_copy(mac->advertising, mac->supported); in hclge_update_port_capability()
3158 mac->supported); in hclge_update_port_capability()
3171 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_speed()
3172 if (ret == -EOPNOTSUPP) { in hclge_get_sfp_speed()
3173 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_speed()
3177 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); in hclge_get_sfp_speed()
3181 *speed = le32_to_cpu(resp->speed); in hclge_get_sfp_speed()
3195 resp->query_type = QUERY_ACTIVE_SPEED; in hclge_get_sfp_info()
3197 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_info()
3198 if (ret == -EOPNOTSUPP) { in hclge_get_sfp_info()
3199 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_info()
3203 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); in hclge_get_sfp_info()
3208 * set to mac->speed. in hclge_get_sfp_info()
3210 if (!le32_to_cpu(resp->speed)) in hclge_get_sfp_info()
3213 mac->speed = le32_to_cpu(resp->speed); in hclge_get_sfp_info()
3214 /* if resp->speed_ability is 0, it means it's an old version in hclge_get_sfp_info()
3217 if (resp->speed_ability) { in hclge_get_sfp_info()
3218 mac->module_type = le32_to_cpu(resp->module_type); in hclge_get_sfp_info()
3219 mac->speed_ability = le32_to_cpu(resp->speed_ability); in hclge_get_sfp_info()
3220 mac->autoneg = resp->autoneg; in hclge_get_sfp_info()
3221 mac->support_autoneg = resp->autoneg_ability; in hclge_get_sfp_info()
3222 mac->speed_type = QUERY_ACTIVE_SPEED; in hclge_get_sfp_info()
3223 mac->lane_num = resp->lane_num; in hclge_get_sfp_info()
3224 if (!resp->active_fec) in hclge_get_sfp_info()
3225 mac->fec_mode = 0; in hclge_get_sfp_info()
3227 mac->fec_mode = BIT(resp->active_fec); in hclge_get_sfp_info()
3228 mac->fec_ability = resp->fec_ability; in hclge_get_sfp_info()
3230 mac->speed_type = QUERY_SFP_SPEED; in hclge_get_sfp_info()
3243 u32 supported, advertising, lp_advertising; in hclge_get_phy_link_ksettings() local
3244 struct hclge_dev *hdev = vport->back; in hclge_get_phy_link_ksettings()
3253 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); in hclge_get_phy_link_ksettings()
3255 dev_err(&hdev->pdev->dev, in hclge_get_phy_link_ksettings()
3261 cmd->base.autoneg = req0->autoneg; in hclge_get_phy_link_ksettings()
3262 cmd->base.speed = le32_to_cpu(req0->speed); in hclge_get_phy_link_ksettings()
3263 cmd->base.duplex = req0->duplex; in hclge_get_phy_link_ksettings()
3264 cmd->base.port = req0->port; in hclge_get_phy_link_ksettings()
3265 cmd->base.transceiver = req0->transceiver; in hclge_get_phy_link_ksettings()
3266 cmd->base.phy_address = req0->phy_address; in hclge_get_phy_link_ksettings()
3267 cmd->base.eth_tp_mdix = req0->eth_tp_mdix; in hclge_get_phy_link_ksettings()
3268 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl; in hclge_get_phy_link_ksettings()
3269 supported = le32_to_cpu(req0->supported); in hclge_get_phy_link_ksettings()
3270 advertising = le32_to_cpu(req0->advertising); in hclge_get_phy_link_ksettings()
3271 lp_advertising = le32_to_cpu(req0->lp_advertising); in hclge_get_phy_link_ksettings()
3272 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, in hclge_get_phy_link_ksettings()
3274 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, in hclge_get_phy_link_ksettings()
3275 advertising); in hclge_get_phy_link_ksettings()
3276 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, in hclge_get_phy_link_ksettings()
3280 cmd->base.master_slave_cfg = req1->master_slave_cfg; in hclge_get_phy_link_ksettings()
3281 cmd->base.master_slave_state = req1->master_slave_state; in hclge_get_phy_link_ksettings()
3294 struct hclge_dev *hdev = vport->back; in hclge_set_phy_link_ksettings()
3295 u32 advertising; in hclge_set_phy_link_ksettings() local
3298 if (cmd->base.autoneg == AUTONEG_DISABLE && in hclge_set_phy_link_ksettings()
3299 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) || in hclge_set_phy_link_ksettings()
3300 (cmd->base.duplex != DUPLEX_HALF && in hclge_set_phy_link_ksettings()
3301 cmd->base.duplex != DUPLEX_FULL))) in hclge_set_phy_link_ksettings()
3302 return -EINVAL; in hclge_set_phy_link_ksettings()
3311 req0->autoneg = cmd->base.autoneg; in hclge_set_phy_link_ksettings()
3312 req0->speed = cpu_to_le32(cmd->base.speed); in hclge_set_phy_link_ksettings()
3313 req0->duplex = cmd->base.duplex; in hclge_set_phy_link_ksettings()
3314 ethtool_convert_link_mode_to_legacy_u32(&advertising, in hclge_set_phy_link_ksettings()
3315 cmd->link_modes.advertising); in hclge_set_phy_link_ksettings()
3316 req0->advertising = cpu_to_le32(advertising); in hclge_set_phy_link_ksettings()
3317 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; in hclge_set_phy_link_ksettings()
3320 req1->master_slave_cfg = cmd->base.master_slave_cfg; in hclge_set_phy_link_ksettings()
3322 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); in hclge_set_phy_link_ksettings()
3324 dev_err(&hdev->pdev->dev, in hclge_set_phy_link_ksettings()
3329 hdev->hw.mac.autoneg = cmd->base.autoneg; in hclge_set_phy_link_ksettings()
3330 hdev->hw.mac.speed = cmd->base.speed; in hclge_set_phy_link_ksettings()
3331 hdev->hw.mac.duplex = cmd->base.duplex; in hclge_set_phy_link_ksettings()
3332 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); in hclge_set_phy_link_ksettings()
3345 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); in hclge_update_tp_port_info()
3349 hdev->hw.mac.autoneg = cmd.base.autoneg; in hclge_update_tp_port_info()
3350 hdev->hw.mac.speed = cmd.base.speed; in hclge_update_tp_port_info()
3351 hdev->hw.mac.duplex = cmd.base.duplex; in hclge_update_tp_port_info()
3352 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising); in hclge_update_tp_port_info()
3364 cmd.base.autoneg = hdev->hw.mac.autoneg; in hclge_tp_port_init()
3365 cmd.base.speed = hdev->hw.mac.speed; in hclge_tp_port_init()
3366 cmd.base.duplex = hdev->hw.mac.duplex; in hclge_tp_port_init()
3367 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); in hclge_tp_port_init()
3369 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); in hclge_tp_port_init()
3374 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_port_info()
3379 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) in hclge_update_port_info()
3383 if (!hdev->support_sfp_query) in hclge_update_port_info()
3386 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_update_port_info()
3387 speed = mac->speed; in hclge_update_port_info()
3394 if (ret == -EOPNOTSUPP) { in hclge_update_port_info()
3395 hdev->support_sfp_query = false; in hclge_update_port_info()
3401 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_update_port_info()
3402 if (mac->speed_type == QUERY_ACTIVE_SPEED) { in hclge_update_port_info()
3404 if (mac->speed != speed) in hclge_update_port_info()
3408 return hclge_cfg_mac_speed_dup(hdev, mac->speed, in hclge_update_port_info()
3409 HCLGE_MAC_FULL, mac->lane_num); in hclge_update_port_info()
3422 struct hclge_dev *hdev = vport->back; in hclge_get_status()
3426 return hdev->hw.mac.link; in hclge_get_status()
3431 if (!pci_num_vf(hdev->pdev)) { in hclge_get_vf_vport()
3432 dev_err(&hdev->pdev->dev, in hclge_get_vf_vport()
3437 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { in hclge_get_vf_vport()
3438 dev_err(&hdev->pdev->dev, in hclge_get_vf_vport()
3440 vf, pci_num_vf(hdev->pdev)); in hclge_get_vf_vport()
3446 return &hdev->vport[vf]; in hclge_get_vf_vport()
3453 struct hclge_dev *hdev = vport->back; in hclge_get_vf_config()
3457 return -EINVAL; in hclge_get_vf_config()
3459 ivf->vf = vf; in hclge_get_vf_config()
3460 ivf->linkstate = vport->vf_info.link_state; in hclge_get_vf_config()
3461 ivf->spoofchk = vport->vf_info.spoofchk; in hclge_get_vf_config()
3462 ivf->trusted = vport->vf_info.trusted; in hclge_get_vf_config()
3463 ivf->min_tx_rate = 0; in hclge_get_vf_config()
3464 ivf->max_tx_rate = vport->vf_info.max_tx_rate; in hclge_get_vf_config()
3465 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag; in hclge_get_vf_config()
3466 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto); in hclge_get_vf_config()
3467 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos; in hclge_get_vf_config()
3468 ether_addr_copy(ivf->mac, vport->vf_info.mac); in hclge_get_vf_config()
3477 struct hclge_dev *hdev = vport->back; in hclge_set_vf_link_state()
3483 return -EINVAL; in hclge_set_vf_link_state()
3485 link_state_old = vport->vf_info.link_state; in hclge_set_vf_link_state()
3486 vport->vf_info.link_state = link_state; in hclge_set_vf_link_state()
3491 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) in hclge_set_vf_link_state()
3496 vport->vf_info.link_state = link_state_old; in hclge_set_vf_link_state()
3497 dev_err(&hdev->pdev->dev, in hclge_set_vf_link_state()
3509 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); in hclge_check_event_cause()
3510 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); in hclge_check_event_cause()
3511 hw_err_src_reg = hclge_read_dev(&hdev->hw, in hclge_check_event_cause()
3523 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); in hclge_check_event_cause()
3524 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); in hclge_check_event_cause()
3525 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_check_event_cause()
3527 hdev->rst_stats.imp_rst_cnt++; in hclge_check_event_cause()
3532 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); in hclge_check_event_cause()
3533 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_check_event_cause()
3534 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); in hclge_check_event_cause()
3536 hdev->rst_stats.global_rst_cnt++; in hclge_check_event_cause()
3559 dev_info(&hdev->pdev->dev, in hclge_check_event_cause()
3577 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); in hclge_clear_event_cause()
3580 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); in hclge_clear_event_cause()
3598 writel(enable ? 1 : 0, vector->addr); in hclge_enable_vector()
3608 hclge_enable_vector(&hdev->misc_vector, false); in hclge_misc_irq_handle()
3620 spin_lock_irqsave(&hdev->ptp->lock, flags); in hclge_misc_irq_handle()
3622 spin_unlock_irqrestore(&hdev->ptp->lock, flags); in hclge_misc_irq_handle()
3637 dev_warn(&hdev->pdev->dev, in hclge_misc_irq_handle()
3648 hclge_enable_vector(&hdev->misc_vector, true); in hclge_misc_irq_handle()
3655 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { in hclge_free_vector()
3656 dev_warn(&hdev->pdev->dev, in hclge_free_vector()
3661 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; in hclge_free_vector()
3662 hdev->num_msi_left += 1; in hclge_free_vector()
3663 hdev->num_msi_used -= 1; in hclge_free_vector()
3668 struct hclge_misc_vector *vector = &hdev->misc_vector; in hclge_get_misc_vector()
3670 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); in hclge_get_misc_vector()
3672 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; in hclge_get_misc_vector()
3673 hdev->vector_status[0] = 0; in hclge_get_misc_vector()
3675 hdev->num_msi_left -= 1; in hclge_get_misc_vector()
3676 hdev->num_msi_used += 1; in hclge_get_misc_vector()
3686 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", in hclge_misc_irq_init()
3687 HCLGE_NAME, pci_name(hdev->pdev)); in hclge_misc_irq_init()
3688 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, in hclge_misc_irq_init()
3689 0, hdev->misc_vector.name, hdev); in hclge_misc_irq_init()
3692 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", in hclge_misc_irq_init()
3693 hdev->misc_vector.vector_irq); in hclge_misc_irq_init()
3701 free_irq(hdev->misc_vector.vector_irq, hdev); in hclge_misc_irq_uninit()
3708 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_notify_client()
3709 struct hnae3_client *client = hdev->nic_client; in hclge_notify_client()
3712 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) in hclge_notify_client()
3715 if (!client->ops->reset_notify) in hclge_notify_client()
3716 return -EOPNOTSUPP; in hclge_notify_client()
3718 ret = client->ops->reset_notify(handle, type); in hclge_notify_client()
3720 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", in hclge_notify_client()
3729 struct hnae3_handle *handle = &hdev->vport[0].roce; in hclge_notify_roce_client()
3730 struct hnae3_client *client = hdev->roce_client; in hclge_notify_roce_client()
3733 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) in hclge_notify_roce_client()
3736 if (!client->ops->reset_notify) in hclge_notify_roce_client()
3737 return -EOPNOTSUPP; in hclge_notify_roce_client()
3739 ret = client->ops->reset_notify(handle, type); in hclge_notify_roce_client()
3741 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", in hclge_notify_roce_client()
3755 switch (hdev->reset_type) { in hclge_reset_wait()
3769 dev_err(&hdev->pdev->dev, in hclge_reset_wait()
3771 hdev->reset_type); in hclge_reset_wait()
3772 return -EINVAL; in hclge_reset_wait()
3775 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3778 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3783 dev_warn(&hdev->pdev->dev, in hclge_reset_wait()
3784 "Wait for reset timeout: %d\n", hdev->reset_type); in hclge_reset_wait()
3785 return -EBUSY; in hclge_reset_wait()
3798 req->dest_vfid = func_id; in hclge_set_vf_rst()
3801 req->vf_rst = 0x1; in hclge_set_vf_rst()
3803 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vf_rst()
3810 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { in hclge_set_all_vf_rst()
3811 struct hclge_vport *vport = &hdev->vport[i]; in hclge_set_all_vf_rst()
3815 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); in hclge_set_all_vf_rst()
3817 dev_err(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3819 vport->vport_id - HCLGE_VF_VPORT_START_NUM, in hclge_set_all_vf_rst()
3825 !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state)) in hclge_set_all_vf_rst()
3828 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) && in hclge_set_all_vf_rst()
3829 hdev->reset_type == HNAE3_FUNC_RESET) { in hclge_set_all_vf_rst()
3831 &vport->need_notify); in hclge_set_all_vf_rst()
3841 dev_warn(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3843 vport->vport_id - HCLGE_VF_VPORT_START_NUM, in hclge_set_all_vf_rst()
3852 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || in hclge_mailbox_service_task()
3853 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || in hclge_mailbox_service_task()
3854 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) in hclge_mailbox_service_task()
3857 if (time_is_before_jiffies(hdev->last_mbx_scheduled + in hclge_mailbox_service_task()
3859 dev_warn(&hdev->pdev->dev, in hclge_mailbox_service_task()
3861 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), in hclge_mailbox_service_task()
3866 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_mailbox_service_task()
3883 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_sync_vf()
3887 if (ret == -EOPNOTSUPP) { in hclge_func_reset_sync_vf()
3891 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n", in hclge_func_reset_sync_vf()
3894 } else if (req->all_vf_ready) { in hclge_func_reset_sync_vf()
3901 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); in hclge_func_reset_sync_vf()
3907 struct hnae3_client *client = hdev->nic_client; in hclge_report_hw_error()
3909 if (!client || !client->ops->process_hw_error || in hclge_report_hw_error()
3910 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) in hclge_report_hw_error()
3913 client->ops->process_hw_error(&hdev->vport[0].nic, type); in hclge_report_hw_error()
3920 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_handle_imp_error()
3924 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
3930 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
3941 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); in hclge_func_reset_cmd()
3942 req->fun_reset_vfid = func_id; in hclge_func_reset_cmd()
3944 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_cmd()
3946 dev_err(&hdev->pdev->dev, in hclge_func_reset_cmd()
3954 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_do_reset()
3955 struct pci_dev *pdev = hdev->pdev; in hclge_do_reset()
3959 dev_info(&pdev->dev, "hardware reset not finish\n"); in hclge_do_reset()
3960 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", in hclge_do_reset()
3961 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), in hclge_do_reset()
3962 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); in hclge_do_reset()
3966 switch (hdev->reset_type) { in hclge_do_reset()
3968 dev_info(&pdev->dev, "IMP reset requested\n"); in hclge_do_reset()
3969 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_do_reset()
3971 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); in hclge_do_reset()
3974 dev_info(&pdev->dev, "global reset requested\n"); in hclge_do_reset()
3975 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); in hclge_do_reset()
3977 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); in hclge_do_reset()
3980 dev_info(&pdev->dev, "PF reset requested\n"); in hclge_do_reset()
3982 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); in hclge_do_reset()
3986 dev_warn(&pdev->dev, in hclge_do_reset()
3987 "unsupported reset type: %d\n", hdev->reset_type); in hclge_do_reset()
3996 struct hclge_dev *hdev = ae_dev->priv; in hclge_get_reset_level()
4016 if (hdev->reset_type != HNAE3_NONE_RESET && in hclge_get_reset_level()
4017 rst_level < hdev->reset_type) in hclge_get_reset_level()
4027 switch (hdev->reset_type) { in hclge_clear_reset_cause()
4044 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_clear_reset_cause()
4045 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, in hclge_clear_reset_cause()
4048 hclge_enable_vector(&hdev->misc_vector, true); in hclge_clear_reset_cause()
4055 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); in hclge_reset_handshake()
4061 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); in hclge_reset_handshake()
4082 switch (hdev->reset_type) { in hclge_reset_prepare_wait()
4090 dev_err(&hdev->pdev->dev, in hclge_reset_prepare_wait()
4100 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_reset_prepare_wait()
4101 hdev->rst_stats.pf_rst_cnt++; in hclge_reset_prepare_wait()
4110 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_reset_prepare_wait()
4111 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, in hclge_reset_prepare_wait()
4121 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); in hclge_reset_prepare_wait()
4136 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf); in hclge_show_rst_info()
4145 if (hdev->reset_pending) { in hclge_reset_err_handle()
4146 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", in hclge_reset_err_handle()
4147 hdev->reset_pending); in hclge_reset_err_handle()
4149 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & in hclge_reset_err_handle()
4151 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
4155 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { in hclge_reset_err_handle()
4156 hdev->rst_stats.reset_fail_cnt++; in hclge_reset_err_handle()
4157 set_bit(hdev->reset_type, &hdev->reset_pending); in hclge_reset_err_handle()
4158 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
4159 "re-schedule reset task(%u)\n", in hclge_reset_err_handle()
4160 hdev->rst_stats.reset_fail_cnt); in hclge_reset_err_handle()
4169 dev_err(&hdev->pdev->dev, "Reset fail!\n"); in hclge_reset_err_handle()
4173 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_reset_err_handle()
4180 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_update_reset_level()
4187 hclge_get_reset_level(ae_dev, &hdev->reset_request); in hclge_update_reset_level()
4194 &hdev->default_reset_request); in hclge_update_reset_level()
4196 set_bit(reset_level, &hdev->reset_request); in hclge_update_reset_level()
4207 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; in hclge_set_rst_done()
4209 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rst_done()
4214 if (ret == -EOPNOTSUPP) { in hclge_set_rst_done()
4215 dev_warn(&hdev->pdev->dev, in hclge_set_rst_done()
4220 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", in hclge_set_rst_done()
4231 switch (hdev->reset_type) { in hclge_reset_prepare_up()
4244 /* clear up the handshake status after re-initialize done */ in hclge_reset_prepare_up()
4258 ret = hclge_reset_ae_dev(hdev->ae_dev); in hclge_reset_stack()
4269 hdev->rst_stats.reset_cnt++; in hclge_reset_prepare()
4288 hdev->rst_stats.hw_reset_done_cnt++; in hclge_reset_rebuild()
4303 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 in hclge_reset_rebuild()
4307 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) in hclge_reset_rebuild()
4324 hdev->last_reset_time = jiffies; in hclge_reset_rebuild()
4325 hdev->rst_stats.reset_fail_cnt = 0; in hclge_reset_rebuild()
4326 hdev->rst_stats.reset_done_cnt++; in hclge_reset_rebuild()
4327 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_reset_rebuild()
4355 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_event()
4372 if (time_before(jiffies, (hdev->last_reset_time + in hclge_reset_event()
4374 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_reset_event()
4378 if (hdev->default_reset_request) { in hclge_reset_event()
4379 hdev->reset_level = in hclge_reset_event()
4381 &hdev->default_reset_request); in hclge_reset_event()
4382 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { in hclge_reset_event()
4383 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_reset_event()
4386 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", in hclge_reset_event()
4387 hdev->reset_level); in hclge_reset_event()
4390 set_bit(hdev->reset_level, &hdev->reset_request); in hclge_reset_event()
4393 if (hdev->reset_level < HNAE3_GLOBAL_RESET) in hclge_reset_event()
4394 hdev->reset_level++; in hclge_reset_event()
4400 struct hclge_dev *hdev = ae_dev->priv; in hclge_set_def_reset_request()
4402 set_bit(rst_type, &hdev->default_reset_request); in hclge_set_def_reset_request()
4412 if (!hdev->default_reset_request) in hclge_reset_timer()
4415 dev_info(&hdev->pdev->dev, in hclge_reset_timer()
4417 hclge_reset_event(hdev->pdev, NULL); in hclge_reset_timer()
4422 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_reset_subtask()
4430 * b. else, we can come back later to check this status so re-sched in hclge_reset_subtask()
4433 hdev->last_reset_time = jiffies; in hclge_reset_subtask()
4434 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); in hclge_reset_subtask()
4435 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
4439 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); in hclge_reset_subtask()
4440 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
4443 hdev->reset_type = HNAE3_NONE_RESET; in hclge_reset_subtask()
4448 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_handle_err_reset_request()
4451 if (ae_dev->hw_err_reset_req) { in hclge_handle_err_reset_request()
4453 &ae_dev->hw_err_reset_req); in hclge_handle_err_reset_request()
4457 if (hdev->default_reset_request && ae_dev->ops->reset_event) in hclge_handle_err_reset_request()
4458 ae_dev->ops->reset_event(hdev->pdev, NULL); in hclge_handle_err_reset_request()
4461 hclge_enable_vector(&hdev->misc_vector, true); in hclge_handle_err_reset_request()
4466 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_handle_err_recovery()
4468 ae_dev->hw_err_reset_req = 0; in hclge_handle_err_recovery()
4481 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_misc_err_recovery()
4482 struct device *dev = &hdev->pdev->dev; in hclge_misc_err_recovery()
4485 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); in hclge_misc_err_recovery()
4488 (hdev, &hdev->default_reset_request)) in hclge_misc_err_recovery()
4500 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) in hclge_errhand_service_task()
4511 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) in hclge_reset_service_task()
4514 if (time_is_before_jiffies(hdev->last_rst_scheduled + in hclge_reset_service_task()
4516 dev_warn(&hdev->pdev->dev, in hclge_reset_service_task()
4518 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), in hclge_reset_service_task()
4521 down(&hdev->reset_sem); in hclge_reset_service_task()
4522 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
4526 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
4527 up(&hdev->reset_sem); in hclge_reset_service_task()
4538 for (i = 1; i < hdev->num_alloc_vport; i++) { in hclge_update_vport_alive()
4539 struct hclge_vport *vport = &hdev->vport[i]; in hclge_update_vport_alive()
4541 if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) || in hclge_update_vport_alive()
4542 !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) in hclge_update_vport_alive()
4544 if (time_after(jiffies, vport->last_active_jiffies + in hclge_update_vport_alive()
4546 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_update_vport_alive()
4547 dev_warn(&hdev->pdev->dev, in hclge_update_vport_alive()
4549 i - HCLGE_VF_VPORT_START_NUM); in hclge_update_vport_alive()
4558 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) in hclge_periodic_service_task()
4569 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { in hclge_periodic_service_task()
4570 delta = jiffies - hdev->last_serv_processed; in hclge_periodic_service_task()
4573 delta = round_jiffies_relative(HZ) - delta; in hclge_periodic_service_task()
4578 hdev->serv_processed_cnt++; in hclge_periodic_service_task()
4581 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { in hclge_periodic_service_task()
4582 hdev->last_serv_processed = jiffies; in hclge_periodic_service_task()
4586 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) in hclge_periodic_service_task()
4592 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) in hclge_periodic_service_task()
4595 hdev->last_serv_processed = jiffies; in hclge_periodic_service_task()
4605 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) || in hclge_ptp_service_task()
4606 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) || in hclge_ptp_service_task()
4607 !time_is_before_jiffies(hdev->ptp->tx_start + HZ)) in hclge_ptp_service_task()
4611 spin_lock_irqsave(&hdev->ptp->lock, flags); in hclge_ptp_service_task()
4616 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) in hclge_ptp_service_task()
4619 spin_unlock_irqrestore(&hdev->ptp->lock, flags); in hclge_ptp_service_task()
4645 if (!handle->client) in hclge_get_vport()
4647 else if (handle->client->type == HNAE3_CLIENT_ROCE) in hclge_get_vport()
4658 vector_info->vector = pci_irq_vector(hdev->pdev, idx); in hclge_get_vector_info()
4661 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2) in hclge_get_vector_info()
4662 vector_info->io_addr = hdev->hw.hw.io_base + in hclge_get_vector_info()
4664 (idx - 1) * HCLGE_VECTOR_REG_OFFSET; in hclge_get_vector_info()
4666 vector_info->io_addr = hdev->hw.hw.io_base + in hclge_get_vector_info()
4668 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * in hclge_get_vector_info()
4670 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * in hclge_get_vector_info()
4673 hdev->vector_status[idx] = hdev->vport[0].vport_id; in hclge_get_vector_info()
4674 hdev->vector_irq[idx] = vector_info->vector; in hclge_get_vector_info()
4682 struct hclge_dev *hdev = vport->back; in hclge_get_vector()
4687 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); in hclge_get_vector()
4688 vector_num = min(hdev->num_msi_left, vector_num); in hclge_get_vector()
4691 while (++i < hdev->num_nic_msi) { in hclge_get_vector()
4692 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { in hclge_get_vector()
4701 hdev->num_msi_left -= alloc; in hclge_get_vector()
4702 hdev->num_msi_used += alloc; in hclge_get_vector()
4711 for (i = 0; i < hdev->num_msi; i++) in hclge_get_vector_index()
4712 if (vector == hdev->vector_irq[i]) in hclge_get_vector_index()
4715 return -EINVAL; in hclge_get_vector_index()
4721 struct hclge_dev *hdev = vport->back; in hclge_put_vector()
4726 dev_err(&hdev->pdev->dev, in hclge_put_vector()
4739 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); in hclge_get_rss()
4741 struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg; in hclge_get_rss()
4746 ae_dev->dev_specs.rss_ind_tbl_size); in hclge_get_rss()
4754 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); in hclge_set_rss()
4756 struct hclge_dev *hdev = vport->back; in hclge_set_rss()
4757 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; in hclge_set_rss()
4760 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc); in hclge_set_rss()
4762 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); in hclge_set_rss()
4767 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) in hclge_set_rss()
4768 rss_cfg->rss_indirection_tbl[i] = indir[i]; in hclge_set_rss()
4771 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw, in hclge_set_rss()
4772 rss_cfg->rss_indirection_tbl); in hclge_set_rss()
4779 struct hclge_dev *hdev = vport->back; in hclge_set_rss_tuple()
4782 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, in hclge_set_rss_tuple()
4783 &hdev->rss_cfg, nfc); in hclge_set_rss_tuple()
4785 dev_err(&hdev->pdev->dev, in hclge_set_rss_tuple()
4800 nfc->data = 0; in hclge_get_rss_tuple()
4802 ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type, in hclge_get_rss_tuple()
4807 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); in hclge_get_rss_tuple()
4815 struct hclge_dev *hdev = vport->back; in hclge_get_tc_size()
4817 return hdev->pf_rss_size_max; in hclge_get_tc_size()
4822 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclge_init_rss_tc_mode()
4823 struct hclge_vport *vport = hdev->vport; in hclge_init_rss_tc_mode()
4832 tc_info = &vport->nic.kinfo.tc_info; in hclge_init_rss_tc_mode()
4834 rss_size = tc_info->tqp_count[i]; in hclge_init_rss_tc_mode()
4837 if (!(hdev->hw_tc_map & BIT(i))) in hclge_init_rss_tc_mode()
4844 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size || in hclge_init_rss_tc_mode()
4846 dev_err(&hdev->pdev->dev, in hclge_init_rss_tc_mode()
4849 return -EINVAL; in hclge_init_rss_tc_mode()
4857 tc_offset[i] = tc_info->tqp_offset[i]; in hclge_init_rss_tc_mode()
4860 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, in hclge_init_rss_tc_mode()
4866 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl; in hclge_rss_init_hw()
4867 u8 *key = hdev->rss_cfg.rss_hash_key; in hclge_rss_init_hw()
4868 u8 hfunc = hdev->rss_cfg.rss_algo; in hclge_rss_init_hw()
4871 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, in hclge_rss_init_hw()
4876 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key); in hclge_rss_init_hw()
4880 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg); in hclge_rss_init_hw()
4891 struct hclge_dev *hdev = vport->back; in hclge_bind_ring_with_vector()
4903 req->int_vector_id_l = hnae3_get_field(vector_id, in hclge_bind_ring_with_vector()
4906 req->int_vector_id_h = hnae3_get_field(vector_id, in hclge_bind_ring_with_vector()
4911 for (node = ring_chain; node; node = node->next) { in hclge_bind_ring_with_vector()
4912 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); in hclge_bind_ring_with_vector()
4915 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); in hclge_bind_ring_with_vector()
4917 HCLGE_TQP_ID_S, node->tqp_index); in hclge_bind_ring_with_vector()
4920 hnae3_get_field(node->int_gl_idx, in hclge_bind_ring_with_vector()
4923 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); in hclge_bind_ring_with_vector()
4925 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; in hclge_bind_ring_with_vector()
4926 req->vfid = vport->vport_id; in hclge_bind_ring_with_vector()
4928 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
4930 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
4933 return -EIO; in hclge_bind_ring_with_vector()
4940 req->int_vector_id_l = in hclge_bind_ring_with_vector()
4944 req->int_vector_id_h = in hclge_bind_ring_with_vector()
4952 req->int_cause_num = i; in hclge_bind_ring_with_vector()
4953 req->vfid = vport->vport_id; in hclge_bind_ring_with_vector()
4954 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
4956 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
4958 return -EIO; in hclge_bind_ring_with_vector()
4969 struct hclge_dev *hdev = vport->back; in hclge_map_ring_to_vector()
4974 dev_err(&hdev->pdev->dev, in hclge_map_ring_to_vector()
4986 struct hclge_dev *hdev = vport->back; in hclge_unmap_ring_frm_vector()
4989 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_unmap_ring_frm_vector()
4994 dev_err(&handle->pdev->dev, in hclge_unmap_ring_frm_vector()
5001 dev_err(&handle->pdev->dev, in hclge_unmap_ring_frm_vector()
5011 struct hclge_vport *vport = &hdev->vport[vf_id]; in hclge_cmd_set_promisc_mode()
5012 struct hnae3_handle *handle = &vport->nic; in hclge_cmd_set_promisc_mode()
5022 req->vf_id = vf_id; in hclge_cmd_set_promisc_mode()
5024 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags)) in hclge_cmd_set_promisc_mode()
5033 req->extend_promisc = promisc_cfg; in hclge_cmd_set_promisc_mode()
5042 req->promisc = promisc_cfg; in hclge_cmd_set_promisc_mode()
5044 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_set_promisc_mode()
5046 dev_err(&hdev->pdev->dev, in hclge_cmd_set_promisc_mode()
5056 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id, in hclge_set_vport_promisc_mode()
5064 struct hclge_dev *hdev = vport->back; in hclge_set_promisc_mode()
5071 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_promisc_mode()
5072 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; in hclge_set_promisc_mode()
5082 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); in hclge_request_update_promisc_mode()
5087 if (hlist_empty(&hdev->fd_rule_list)) in hclge_sync_fd_state()
5088 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_sync_fd_state()
5093 if (!test_bit(location, hdev->fd_bmap)) { in hclge_fd_inc_rule_cnt()
5094 set_bit(location, hdev->fd_bmap); in hclge_fd_inc_rule_cnt()
5095 hdev->hclge_fd_rule_num++; in hclge_fd_inc_rule_cnt()
5101 if (test_bit(location, hdev->fd_bmap)) { in hclge_fd_dec_rule_cnt()
5102 clear_bit(location, hdev->fd_bmap); in hclge_fd_dec_rule_cnt()
5103 hdev->hclge_fd_rule_num--; in hclge_fd_dec_rule_cnt()
5110 hlist_del(&rule->rule_node); in hclge_fd_free_node()
5132 new_rule->rule_node.next = old_rule->rule_node.next; in hclge_update_fd_rule_node()
5133 new_rule->rule_node.pprev = old_rule->rule_node.pprev; in hclge_update_fd_rule_node()
5138 hclge_fd_dec_rule_cnt(hdev, old_rule->location); in hclge_update_fd_rule_node()
5153 if (old_rule->state == HCLGE_FD_TO_ADD) { in hclge_update_fd_rule_node()
5154 hclge_fd_dec_rule_cnt(hdev, old_rule->location); in hclge_update_fd_rule_node()
5158 old_rule->state = HCLGE_FD_TO_DEL; in hclge_update_fd_rule_node()
5171 if (rule->location == location) in hclge_find_fd_rule()
5173 else if (rule->location > location) in hclge_find_fd_rule()
5184 /* insert fd rule node in ascend order according to rule->location */
5189 INIT_HLIST_NODE(&rule->rule_node); in hclge_fd_insert_rule_node()
5192 hlist_add_behind(&rule->rule_node, &parent->rule_node); in hclge_fd_insert_rule_node()
5194 hlist_add_head(&rule->rule_node, hlist); in hclge_fd_insert_rule_node()
5212 req->ol2_cfg = cpu_to_le16(data); in hclge_fd_set_user_def_cmd()
5218 req->ol3_cfg = cpu_to_le16(data); in hclge_fd_set_user_def_cmd()
5224 req->ol4_cfg = cpu_to_le16(data); in hclge_fd_set_user_def_cmd()
5226 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_fd_set_user_def_cmd()
5228 dev_err(&hdev->pdev->dev, in hclge_fd_set_user_def_cmd()
5237 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) in hclge_sync_fd_user_def_cfg()
5241 spin_lock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_user_def_cfg()
5243 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); in hclge_sync_fd_user_def_cfg()
5245 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_sync_fd_user_def_cfg()
5248 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_user_def_cfg()
5254 struct hlist_head *hlist = &hdev->fd_rule_list; in hclge_fd_check_user_def_refcnt()
5259 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || in hclge_fd_check_user_def_refcnt()
5260 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) in hclge_fd_check_user_def_refcnt()
5264 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_check_user_def_refcnt()
5265 info = &rule->ep.user_def; in hclge_fd_check_user_def_refcnt()
5267 if (!cfg->ref_cnt || cfg->offset == info->offset) in hclge_fd_check_user_def_refcnt()
5270 if (cfg->ref_cnt > 1) in hclge_fd_check_user_def_refcnt()
5273 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent); in hclge_fd_check_user_def_refcnt()
5275 old_info = &fd_rule->ep.user_def; in hclge_fd_check_user_def_refcnt()
5276 if (info->layer == old_info->layer) in hclge_fd_check_user_def_refcnt()
5281 dev_err(&hdev->pdev->dev, in hclge_fd_check_user_def_refcnt()
5283 info->layer + 1); in hclge_fd_check_user_def_refcnt()
5284 return -ENOSPC; in hclge_fd_check_user_def_refcnt()
5292 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || in hclge_fd_inc_user_def_refcnt()
5293 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) in hclge_fd_inc_user_def_refcnt()
5296 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_inc_user_def_refcnt()
5297 if (!cfg->ref_cnt) { in hclge_fd_inc_user_def_refcnt()
5298 cfg->offset = rule->ep.user_def.offset; in hclge_fd_inc_user_def_refcnt()
5299 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_fd_inc_user_def_refcnt()
5301 cfg->ref_cnt++; in hclge_fd_inc_user_def_refcnt()
5309 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || in hclge_fd_dec_user_def_refcnt()
5310 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) in hclge_fd_dec_user_def_refcnt()
5313 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_dec_user_def_refcnt()
5314 if (!cfg->ref_cnt) in hclge_fd_dec_user_def_refcnt()
5317 cfg->ref_cnt--; in hclge_fd_dec_user_def_refcnt()
5318 if (!cfg->ref_cnt) { in hclge_fd_dec_user_def_refcnt()
5319 cfg->offset = 0; in hclge_fd_dec_user_def_refcnt()
5320 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_fd_dec_user_def_refcnt()
5328 struct hlist_head *hlist = &hdev->fd_rule_list; in hclge_update_fd_list()
5346 dev_warn(&hdev->pdev->dev, in hclge_update_fd_list()
5356 hclge_fd_inc_rule_cnt(hdev, new_rule->location); in hclge_update_fd_list()
5359 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_update_fd_list()
5374 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_mode()
5376 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); in hclge_get_fd_mode()
5380 *fd_mode = req->mode; in hclge_get_fd_mode()
5399 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_allocation()
5401 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", in hclge_get_fd_allocation()
5406 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); in hclge_get_fd_allocation()
5407 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); in hclge_get_fd_allocation()
5408 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); in hclge_get_fd_allocation()
5409 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); in hclge_get_fd_allocation()
5425 stage = &hdev->fd_cfg.key_cfg[stage_num]; in hclge_set_fd_key_config()
5426 req->stage = stage_num; in hclge_set_fd_key_config()
5427 req->key_select = stage->key_sel; in hclge_set_fd_key_config()
5428 req->inner_sipv6_word_en = stage->inner_sipv6_word_en; in hclge_set_fd_key_config()
5429 req->inner_dipv6_word_en = stage->inner_dipv6_word_en; in hclge_set_fd_key_config()
5430 req->outer_sipv6_word_en = stage->outer_sipv6_word_en; in hclge_set_fd_key_config()
5431 req->outer_dipv6_word_en = stage->outer_dipv6_word_en; in hclge_set_fd_key_config()
5432 req->tuple_mask = cpu_to_le32(~stage->tuple_active); in hclge_set_fd_key_config()
5433 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); in hclge_set_fd_key_config()
5435 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fd_key_config()
5437 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); in hclge_set_fd_key_config()
5444 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; in hclge_fd_disable_user_def()
5446 spin_lock_bh(&hdev->fd_rule_lock); in hclge_fd_disable_user_def()
5447 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); in hclge_fd_disable_user_def()
5448 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_fd_disable_user_def()
5459 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_init_fd_config()
5462 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
5466 switch (hdev->fd_cfg.fd_mode) { in hclge_init_fd_config()
5468 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; in hclge_init_fd_config()
5471 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; in hclge_init_fd_config()
5474 dev_err(&hdev->pdev->dev, in hclge_init_fd_config()
5476 hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
5477 return -EOPNOTSUPP; in hclge_init_fd_config()
5480 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; in hclge_init_fd_config()
5481 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE; in hclge_init_fd_config()
5482 key_cfg->inner_sipv6_word_en = LOW_2_WORDS; in hclge_init_fd_config()
5483 key_cfg->inner_dipv6_word_en = LOW_2_WORDS; in hclge_init_fd_config()
5484 key_cfg->outer_sipv6_word_en = 0; in hclge_init_fd_config()
5485 key_cfg->outer_dipv6_word_en = 0; in hclge_init_fd_config()
5487 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | in hclge_init_fd_config()
5493 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { in hclge_init_fd_config()
5494 key_cfg->tuple_active |= in hclge_init_fd_config()
5496 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hclge_init_fd_config()
5497 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; in hclge_init_fd_config()
5503 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); in hclge_init_fd_config()
5506 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
5507 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], in hclge_init_fd_config()
5508 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
5509 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); in hclge_init_fd_config()
5535 req1->stage = stage; in hclge_fd_tcam_config()
5536 req1->xy_sel = sel_x ? 1 : 0; in hclge_fd_tcam_config()
5537 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); in hclge_fd_tcam_config()
5538 req1->index = cpu_to_le32(loc); in hclge_fd_tcam_config()
5539 req1->entry_vld = sel_x ? is_add : 0; in hclge_fd_tcam_config()
5542 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); in hclge_fd_tcam_config()
5543 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], in hclge_fd_tcam_config()
5544 sizeof(req2->tcam_data)); in hclge_fd_tcam_config()
5545 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + in hclge_fd_tcam_config()
5546 sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); in hclge_fd_tcam_config()
5549 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_fd_tcam_config()
5551 dev_err(&hdev->pdev->dev, in hclge_fd_tcam_config()
5561 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_fd_ad_config()
5570 req->index = cpu_to_le32(loc); in hclge_fd_ad_config()
5571 req->stage = stage; in hclge_fd_ad_config()
5574 action->write_rule_id_to_bd); in hclge_fd_ad_config()
5576 action->rule_id); in hclge_fd_ad_config()
5577 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { in hclge_fd_ad_config()
5579 action->override_tc); in hclge_fd_ad_config()
5581 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size); in hclge_fd_ad_config()
5584 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); in hclge_fd_ad_config()
5586 action->forward_to_direct_queue); in hclge_fd_ad_config()
5588 action->queue_id); in hclge_fd_ad_config()
5589 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); in hclge_fd_ad_config()
5591 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); in hclge_fd_ad_config()
5592 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); in hclge_fd_ad_config()
5594 action->counter_id); in hclge_fd_ad_config()
5596 req->ad_data = cpu_to_le64(ad_data); in hclge_fd_ad_config()
5597 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_fd_ad_config()
5599 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); in hclge_fd_ad_config()
5614 if (rule->unused_tuple & BIT(tuple_bit)) in hclge_fd_convert_tuple()
5643 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i], in hclge_fd_convert_tuple()
5645 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i], in hclge_fd_convert_tuple()
5695 tuple_bit = key_cfg->meta_data_active & BIT(i); in hclge_fd_convert_meta_data()
5704 rule->vf_id, 0); in hclge_fd_convert_meta_data()
5717 shift_bits = sizeof(meta_data) * 8 - cur_pos; in hclge_fd_convert_meta_data()
5730 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; in hclge_config_key()
5747 if (!(key_cfg->tuple_active & BIT(i))) in hclge_config_key()
5758 meta_data_region = hdev->fd_cfg.max_key_length / 8 - in hclge_config_key()
5766 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, in hclge_config_key()
5769 dev_err(&hdev->pdev->dev, in hclge_config_key()
5771 rule->queue_id, ret); in hclge_config_key()
5775 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, in hclge_config_key()
5778 dev_err(&hdev->pdev->dev, in hclge_config_key()
5780 rule->queue_id, ret); in hclge_config_key()
5787 struct hclge_vport *vport = hdev->vport; in hclge_config_action()
5788 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; in hclge_config_action()
5792 ad_data.ad_id = rule->location; in hclge_config_action()
5794 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { in hclge_config_action()
5796 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) { in hclge_config_action()
5799 kinfo->tc_info.tqp_offset[rule->cls_flower.tc]; in hclge_config_action()
5801 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]); in hclge_config_action()
5804 ad_data.queue_id = rule->queue_id; in hclge_config_action()
5807 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) { in hclge_config_action()
5809 ad_data.counter_id = rule->vf_id % in hclge_config_action()
5810 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; in hclge_config_action()
5820 ad_data.rule_id = rule->location; in hclge_config_action()
5829 return -EINVAL; in hclge_fd_check_tcpip4_tuple()
5833 if (!spec->ip4src) in hclge_fd_check_tcpip4_tuple()
5836 if (!spec->ip4dst) in hclge_fd_check_tcpip4_tuple()
5839 if (!spec->psrc) in hclge_fd_check_tcpip4_tuple()
5842 if (!spec->pdst) in hclge_fd_check_tcpip4_tuple()
5845 if (!spec->tos) in hclge_fd_check_tcpip4_tuple()
5855 return -EINVAL; in hclge_fd_check_ip4_tuple()
5860 if (!spec->ip4src) in hclge_fd_check_ip4_tuple()
5863 if (!spec->ip4dst) in hclge_fd_check_ip4_tuple()
5866 if (!spec->tos) in hclge_fd_check_ip4_tuple()
5869 if (!spec->proto) in hclge_fd_check_ip4_tuple()
5872 if (spec->l4_4_bytes) in hclge_fd_check_ip4_tuple()
5873 return -EOPNOTSUPP; in hclge_fd_check_ip4_tuple()
5875 if (spec->ip_ver != ETH_RX_NFC_IP4) in hclge_fd_check_ip4_tuple()
5876 return -EOPNOTSUPP; in hclge_fd_check_ip4_tuple()
5885 return -EINVAL; in hclge_fd_check_tcpip6_tuple()
5890 if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) in hclge_fd_check_tcpip6_tuple()
5893 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) in hclge_fd_check_tcpip6_tuple()
5896 if (!spec->psrc) in hclge_fd_check_tcpip6_tuple()
5899 if (!spec->pdst) in hclge_fd_check_tcpip6_tuple()
5902 if (!spec->tclass) in hclge_fd_check_tcpip6_tuple()
5912 return -EINVAL; in hclge_fd_check_ip6_tuple()
5918 if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) in hclge_fd_check_ip6_tuple()
5921 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) in hclge_fd_check_ip6_tuple()
5924 if (!spec->l4_proto) in hclge_fd_check_ip6_tuple()
5927 if (!spec->tclass) in hclge_fd_check_ip6_tuple()
5930 if (spec->l4_4_bytes) in hclge_fd_check_ip6_tuple()
5931 return -EOPNOTSUPP; in hclge_fd_check_ip6_tuple()
5939 return -EINVAL; in hclge_fd_check_ether_tuple()
5945 if (is_zero_ether_addr(spec->h_source)) in hclge_fd_check_ether_tuple()
5948 if (is_zero_ether_addr(spec->h_dest)) in hclge_fd_check_ether_tuple()
5951 if (!spec->h_proto) in hclge_fd_check_ether_tuple()
5961 if (fs->flow_type & FLOW_EXT) { in hclge_fd_check_ext_tuple()
5962 if (fs->h_ext.vlan_etype) { in hclge_fd_check_ext_tuple()
5963 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); in hclge_fd_check_ext_tuple()
5964 return -EOPNOTSUPP; in hclge_fd_check_ext_tuple()
5967 if (!fs->h_ext.vlan_tci) in hclge_fd_check_ext_tuple()
5970 if (fs->m_ext.vlan_tci && in hclge_fd_check_ext_tuple()
5971 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) { in hclge_fd_check_ext_tuple()
5972 dev_err(&hdev->pdev->dev, in hclge_fd_check_ext_tuple()
5974 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1); in hclge_fd_check_ext_tuple()
5975 return -EINVAL; in hclge_fd_check_ext_tuple()
5981 if (fs->flow_type & FLOW_MAC_EXT) { in hclge_fd_check_ext_tuple()
5982 if (hdev->fd_cfg.fd_mode != in hclge_fd_check_ext_tuple()
5984 dev_err(&hdev->pdev->dev, in hclge_fd_check_ext_tuple()
5986 return -EOPNOTSUPP; in hclge_fd_check_ext_tuple()
5989 if (is_zero_ether_addr(fs->h_ext.h_dest)) in hclge_fd_check_ext_tuple()
6003 info->layer = HCLGE_FD_USER_DEF_L2; in hclge_fd_get_user_def_layer()
6008 info->layer = HCLGE_FD_USER_DEF_L3; in hclge_fd_get_user_def_layer()
6015 info->layer = HCLGE_FD_USER_DEF_L4; in hclge_fd_get_user_def_layer()
6019 return -EOPNOTSUPP; in hclge_fd_get_user_def_layer()
6027 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0; in hclge_fd_is_user_def_all_masked()
6035 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; in hclge_fd_parse_user_def_field()
6036 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); in hclge_fd_parse_user_def_field()
6040 info->layer = HCLGE_FD_USER_DEF_NONE; in hclge_fd_parse_user_def_field()
6043 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs)) in hclge_fd_parse_user_def_field()
6046 /* user-def data from ethtool is 64 bit value, the bit0~15 is used in hclge_fd_parse_user_def_field()
6049 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; in hclge_fd_parse_user_def_field()
6050 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; in hclge_fd_parse_user_def_field()
6051 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; in hclge_fd_parse_user_def_field()
6052 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; in hclge_fd_parse_user_def_field()
6055 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); in hclge_fd_parse_user_def_field()
6056 return -EOPNOTSUPP; in hclge_fd_parse_user_def_field()
6060 dev_err(&hdev->pdev->dev, in hclge_fd_parse_user_def_field()
6061 "user-def offset[%u] should be no more than %u\n", in hclge_fd_parse_user_def_field()
6063 return -EINVAL; in hclge_fd_parse_user_def_field()
6067 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); in hclge_fd_parse_user_def_field()
6068 return -EINVAL; in hclge_fd_parse_user_def_field()
6073 dev_err(&hdev->pdev->dev, in hclge_fd_parse_user_def_field()
6074 "unsupported flow type for user-def bytes, ret = %d\n", in hclge_fd_parse_user_def_field()
6079 info->data = data; in hclge_fd_parse_user_def_field()
6080 info->data_mask = data_mask; in hclge_fd_parse_user_def_field()
6081 info->offset = offset; in hclge_fd_parse_user_def_field()
6094 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_fd_check_spec()
6095 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6097 fs->location, in hclge_fd_check_spec()
6098 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); in hclge_fd_check_spec()
6099 return -EINVAL; in hclge_fd_check_spec()
6106 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); in hclge_fd_check_spec()
6111 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec, in hclge_fd_check_spec()
6115 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec, in hclge_fd_check_spec()
6121 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec, in hclge_fd_check_spec()
6125 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec, in hclge_fd_check_spec()
6129 if (hdev->fd_cfg.fd_mode != in hclge_fd_check_spec()
6131 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6133 return -EOPNOTSUPP; in hclge_fd_check_spec()
6136 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec, in hclge_fd_check_spec()
6140 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6143 return -EOPNOTSUPP; in hclge_fd_check_spec()
6147 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6159 rule->tuples.src_ip[IPV4_INDEX] = in hclge_fd_get_tcpip4_tuple()
6160 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); in hclge_fd_get_tcpip4_tuple()
6161 rule->tuples_mask.src_ip[IPV4_INDEX] = in hclge_fd_get_tcpip4_tuple()
6162 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); in hclge_fd_get_tcpip4_tuple()
6164 rule->tuples.dst_ip[IPV4_INDEX] = in hclge_fd_get_tcpip4_tuple()
6165 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); in hclge_fd_get_tcpip4_tuple()
6166 rule->tuples_mask.dst_ip[IPV4_INDEX] = in hclge_fd_get_tcpip4_tuple()
6167 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); in hclge_fd_get_tcpip4_tuple()
6169 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); in hclge_fd_get_tcpip4_tuple()
6170 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); in hclge_fd_get_tcpip4_tuple()
6172 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); in hclge_fd_get_tcpip4_tuple()
6173 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); in hclge_fd_get_tcpip4_tuple()
6175 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; in hclge_fd_get_tcpip4_tuple()
6176 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; in hclge_fd_get_tcpip4_tuple()
6178 rule->tuples.ether_proto = ETH_P_IP; in hclge_fd_get_tcpip4_tuple()
6179 rule->tuples_mask.ether_proto = 0xFFFF; in hclge_fd_get_tcpip4_tuple()
6181 rule->tuples.ip_proto = ip_proto; in hclge_fd_get_tcpip4_tuple()
6182 rule->tuples_mask.ip_proto = 0xFF; in hclge_fd_get_tcpip4_tuple()
6188 rule->tuples.src_ip[IPV4_INDEX] = in hclge_fd_get_ip4_tuple()
6189 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); in hclge_fd_get_ip4_tuple()
6190 rule->tuples_mask.src_ip[IPV4_INDEX] = in hclge_fd_get_ip4_tuple()
6191 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); in hclge_fd_get_ip4_tuple()
6193 rule->tuples.dst_ip[IPV4_INDEX] = in hclge_fd_get_ip4_tuple()
6194 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); in hclge_fd_get_ip4_tuple()
6195 rule->tuples_mask.dst_ip[IPV4_INDEX] = in hclge_fd_get_ip4_tuple()
6196 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); in hclge_fd_get_ip4_tuple()
6198 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; in hclge_fd_get_ip4_tuple()
6199 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; in hclge_fd_get_ip4_tuple()
6201 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; in hclge_fd_get_ip4_tuple()
6202 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; in hclge_fd_get_ip4_tuple()
6204 rule->tuples.ether_proto = ETH_P_IP; in hclge_fd_get_ip4_tuple()
6205 rule->tuples_mask.ether_proto = 0xFFFF; in hclge_fd_get_ip4_tuple()
6211 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src, in hclge_fd_get_tcpip6_tuple()
6213 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src, in hclge_fd_get_tcpip6_tuple()
6216 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst, in hclge_fd_get_tcpip6_tuple()
6218 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst, in hclge_fd_get_tcpip6_tuple()
6221 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); in hclge_fd_get_tcpip6_tuple()
6222 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); in hclge_fd_get_tcpip6_tuple()
6224 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); in hclge_fd_get_tcpip6_tuple()
6225 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); in hclge_fd_get_tcpip6_tuple()
6227 rule->tuples.ether_proto = ETH_P_IPV6; in hclge_fd_get_tcpip6_tuple()
6228 rule->tuples_mask.ether_proto = 0xFFFF; in hclge_fd_get_tcpip6_tuple()
6230 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; in hclge_fd_get_tcpip6_tuple()
6231 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; in hclge_fd_get_tcpip6_tuple()
6233 rule->tuples.ip_proto = ip_proto; in hclge_fd_get_tcpip6_tuple()
6234 rule->tuples_mask.ip_proto = 0xFF; in hclge_fd_get_tcpip6_tuple()
6240 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src, in hclge_fd_get_ip6_tuple()
6242 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src, in hclge_fd_get_ip6_tuple()
6245 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst, in hclge_fd_get_ip6_tuple()
6247 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst, in hclge_fd_get_ip6_tuple()
6250 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; in hclge_fd_get_ip6_tuple()
6251 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; in hclge_fd_get_ip6_tuple()
6253 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; in hclge_fd_get_ip6_tuple()
6254 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; in hclge_fd_get_ip6_tuple()
6256 rule->tuples.ether_proto = ETH_P_IPV6; in hclge_fd_get_ip6_tuple()
6257 rule->tuples_mask.ether_proto = 0xFFFF; in hclge_fd_get_ip6_tuple()
6263 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source); in hclge_fd_get_ether_tuple()
6264 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source); in hclge_fd_get_ether_tuple()
6266 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest); in hclge_fd_get_ether_tuple()
6267 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest); in hclge_fd_get_ether_tuple()
6269 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto); in hclge_fd_get_ether_tuple()
6270 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto); in hclge_fd_get_ether_tuple()
6276 switch (info->layer) { in hclge_fd_get_user_def_tuple()
6278 rule->tuples.l2_user_def = info->data; in hclge_fd_get_user_def_tuple()
6279 rule->tuples_mask.l2_user_def = info->data_mask; in hclge_fd_get_user_def_tuple()
6282 rule->tuples.l3_user_def = info->data; in hclge_fd_get_user_def_tuple()
6283 rule->tuples_mask.l3_user_def = info->data_mask; in hclge_fd_get_user_def_tuple()
6286 rule->tuples.l4_user_def = (u32)info->data << 16; in hclge_fd_get_user_def_tuple()
6287 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16; in hclge_fd_get_user_def_tuple()
6293 rule->ep.user_def = *info; in hclge_fd_get_user_def_tuple()
6300 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); in hclge_fd_get_tuple()
6331 return -EOPNOTSUPP; in hclge_fd_get_tuple()
6334 if (fs->flow_type & FLOW_EXT) { in hclge_fd_get_tuple()
6335 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); in hclge_fd_get_tuple()
6336 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); in hclge_fd_get_tuple()
6340 if (fs->flow_type & FLOW_MAC_EXT) { in hclge_fd_get_tuple()
6341 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); in hclge_fd_get_tuple()
6342 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); in hclge_fd_get_tuple()
6365 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6367 if (hdev->fd_active_type != rule->rule_type && in hclge_add_fd_entry_common()
6368 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || in hclge_add_fd_entry_common()
6369 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { in hclge_add_fd_entry_common()
6370 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry_common()
6372 rule->rule_type, hdev->fd_active_type); in hclge_add_fd_entry_common()
6373 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6374 return -EINVAL; in hclge_add_fd_entry_common()
6389 rule->state = HCLGE_FD_ACTIVE; in hclge_add_fd_entry_common()
6390 hdev->fd_active_type = rule->rule_type; in hclge_add_fd_entry_common()
6391 hclge_update_fd_list(hdev, rule->state, rule->location, rule); in hclge_add_fd_entry_common()
6394 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6401 struct hclge_dev *hdev = vport->back; in hclge_is_cls_flower_active()
6403 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; in hclge_is_cls_flower_active()
6409 struct hclge_vport *vport = hdev->vport; in hclge_fd_parse_ring_cookie()
6421 if (vf > hdev->num_req_vfs) { in hclge_fd_parse_ring_cookie()
6422 dev_err(&hdev->pdev->dev, in hclge_fd_parse_ring_cookie()
6424 vf - 1U, hdev->num_req_vfs); in hclge_fd_parse_ring_cookie()
6425 return -EINVAL; in hclge_fd_parse_ring_cookie()
6428 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; in hclge_fd_parse_ring_cookie()
6429 tqps = hdev->vport[vf].nic.kinfo.num_tqps; in hclge_fd_parse_ring_cookie()
6432 dev_err(&hdev->pdev->dev, in hclge_fd_parse_ring_cookie()
6434 ring, tqps - 1U); in hclge_fd_parse_ring_cookie()
6435 return -EINVAL; in hclge_fd_parse_ring_cookie()
6449 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry()
6458 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_add_fd_entry()
6459 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
6461 return -EOPNOTSUPP; in hclge_add_fd_entry()
6464 if (!hdev->fd_en) { in hclge_add_fd_entry()
6465 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
6467 return -EOPNOTSUPP; in hclge_add_fd_entry()
6470 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; in hclge_add_fd_entry()
6476 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, in hclge_add_fd_entry()
6483 return -ENOMEM; in hclge_add_fd_entry()
6491 rule->flow_type = fs->flow_type; in hclge_add_fd_entry()
6492 rule->location = fs->location; in hclge_add_fd_entry()
6493 rule->unused_tuple = unused; in hclge_add_fd_entry()
6494 rule->vf_id = dst_vport_id; in hclge_add_fd_entry()
6495 rule->queue_id = q_index; in hclge_add_fd_entry()
6496 rule->action = action; in hclge_add_fd_entry()
6497 rule->rule_type = HCLGE_FD_EP_ACTIVE; in hclge_add_fd_entry()
6510 struct hclge_dev *hdev = vport->back; in hclge_del_fd_entry()
6514 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_fd_entry()
6515 return -EOPNOTSUPP; in hclge_del_fd_entry()
6517 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; in hclge_del_fd_entry()
6519 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_del_fd_entry()
6520 return -EINVAL; in hclge_del_fd_entry()
6522 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6523 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || in hclge_del_fd_entry()
6524 !test_bit(fs->location, hdev->fd_bmap)) { in hclge_del_fd_entry()
6525 dev_err(&hdev->pdev->dev, in hclge_del_fd_entry()
6526 "Delete fail, rule %u is inexistent\n", fs->location); in hclge_del_fd_entry()
6527 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6528 return -ENOENT; in hclge_del_fd_entry()
6531 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, in hclge_del_fd_entry()
6536 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); in hclge_del_fd_entry()
6539 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6550 spin_lock_bh(&hdev->fd_rule_lock); in hclge_clear_fd_rules_in_list()
6552 for_each_set_bit(location, hdev->fd_bmap, in hclge_clear_fd_rules_in_list()
6553 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_clear_fd_rules_in_list()
6558 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, in hclge_clear_fd_rules_in_list()
6560 hlist_del(&rule->rule_node); in hclge_clear_fd_rules_in_list()
6563 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_clear_fd_rules_in_list()
6564 hdev->hclge_fd_rule_num = 0; in hclge_clear_fd_rules_in_list()
6565 bitmap_zero(hdev->fd_bmap, in hclge_clear_fd_rules_in_list()
6566 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); in hclge_clear_fd_rules_in_list()
6569 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_clear_fd_rules_in_list()
6574 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_all_fd_entries()
6584 struct hclge_dev *hdev = vport->back; in hclge_restore_fd_entries()
6592 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_restore_fd_entries()
6596 if (!hdev->fd_en) in hclge_restore_fd_entries()
6599 spin_lock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
6600 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_restore_fd_entries()
6601 if (rule->state == HCLGE_FD_ACTIVE) in hclge_restore_fd_entries()
6602 rule->state = HCLGE_FD_TO_ADD; in hclge_restore_fd_entries()
6604 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
6605 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_restore_fd_entries()
6614 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_cnt()
6616 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle)) in hclge_get_fd_rule_cnt()
6617 return -EOPNOTSUPP; in hclge_get_fd_rule_cnt()
6619 cmd->rule_cnt = hdev->hclge_fd_rule_num; in hclge_get_fd_rule_cnt()
6620 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_fd_rule_cnt()
6629 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); in hclge_fd_get_tcpip4_info()
6630 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? in hclge_fd_get_tcpip4_info()
6631 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); in hclge_fd_get_tcpip4_info()
6633 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); in hclge_fd_get_tcpip4_info()
6634 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? in hclge_fd_get_tcpip4_info()
6635 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); in hclge_fd_get_tcpip4_info()
6637 spec->psrc = cpu_to_be16(rule->tuples.src_port); in hclge_fd_get_tcpip4_info()
6638 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? in hclge_fd_get_tcpip4_info()
6639 0 : cpu_to_be16(rule->tuples_mask.src_port); in hclge_fd_get_tcpip4_info()
6641 spec->pdst = cpu_to_be16(rule->tuples.dst_port); in hclge_fd_get_tcpip4_info()
6642 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? in hclge_fd_get_tcpip4_info()
6643 0 : cpu_to_be16(rule->tuples_mask.dst_port); in hclge_fd_get_tcpip4_info()
6645 spec->tos = rule->tuples.ip_tos; in hclge_fd_get_tcpip4_info()
6646 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? in hclge_fd_get_tcpip4_info()
6647 0 : rule->tuples_mask.ip_tos; in hclge_fd_get_tcpip4_info()
6654 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); in hclge_fd_get_ip4_info()
6655 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? in hclge_fd_get_ip4_info()
6656 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); in hclge_fd_get_ip4_info()
6658 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); in hclge_fd_get_ip4_info()
6659 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? in hclge_fd_get_ip4_info()
6660 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); in hclge_fd_get_ip4_info()
6662 spec->tos = rule->tuples.ip_tos; in hclge_fd_get_ip4_info()
6663 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? in hclge_fd_get_ip4_info()
6664 0 : rule->tuples_mask.ip_tos; in hclge_fd_get_ip4_info()
6666 spec->proto = rule->tuples.ip_proto; in hclge_fd_get_ip4_info()
6667 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? in hclge_fd_get_ip4_info()
6668 0 : rule->tuples_mask.ip_proto; in hclge_fd_get_ip4_info()
6670 spec->ip_ver = ETH_RX_NFC_IP4; in hclge_fd_get_ip4_info()
6677 cpu_to_be32_array(spec->ip6src, in hclge_fd_get_tcpip6_info()
6678 rule->tuples.src_ip, IPV6_SIZE); in hclge_fd_get_tcpip6_info()
6679 cpu_to_be32_array(spec->ip6dst, in hclge_fd_get_tcpip6_info()
6680 rule->tuples.dst_ip, IPV6_SIZE); in hclge_fd_get_tcpip6_info()
6681 if (rule->unused_tuple & BIT(INNER_SRC_IP)) in hclge_fd_get_tcpip6_info()
6682 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); in hclge_fd_get_tcpip6_info()
6684 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip, in hclge_fd_get_tcpip6_info()
6687 if (rule->unused_tuple & BIT(INNER_DST_IP)) in hclge_fd_get_tcpip6_info()
6688 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); in hclge_fd_get_tcpip6_info()
6690 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, in hclge_fd_get_tcpip6_info()
6693 spec->tclass = rule->tuples.ip_tos; in hclge_fd_get_tcpip6_info()
6694 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? in hclge_fd_get_tcpip6_info()
6695 0 : rule->tuples_mask.ip_tos; in hclge_fd_get_tcpip6_info()
6697 spec->psrc = cpu_to_be16(rule->tuples.src_port); in hclge_fd_get_tcpip6_info()
6698 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? in hclge_fd_get_tcpip6_info()
6699 0 : cpu_to_be16(rule->tuples_mask.src_port); in hclge_fd_get_tcpip6_info()
6701 spec->pdst = cpu_to_be16(rule->tuples.dst_port); in hclge_fd_get_tcpip6_info()
6702 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? in hclge_fd_get_tcpip6_info()
6703 0 : cpu_to_be16(rule->tuples_mask.dst_port); in hclge_fd_get_tcpip6_info()
6710 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE); in hclge_fd_get_ip6_info()
6711 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE); in hclge_fd_get_ip6_info()
6712 if (rule->unused_tuple & BIT(INNER_SRC_IP)) in hclge_fd_get_ip6_info()
6713 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); in hclge_fd_get_ip6_info()
6715 cpu_to_be32_array(spec_mask->ip6src, in hclge_fd_get_ip6_info()
6716 rule->tuples_mask.src_ip, IPV6_SIZE); in hclge_fd_get_ip6_info()
6718 if (rule->unused_tuple & BIT(INNER_DST_IP)) in hclge_fd_get_ip6_info()
6719 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); in hclge_fd_get_ip6_info()
6721 cpu_to_be32_array(spec_mask->ip6dst, in hclge_fd_get_ip6_info()
6722 rule->tuples_mask.dst_ip, IPV6_SIZE); in hclge_fd_get_ip6_info()
6724 spec->tclass = rule->tuples.ip_tos; in hclge_fd_get_ip6_info()
6725 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? in hclge_fd_get_ip6_info()
6726 0 : rule->tuples_mask.ip_tos; in hclge_fd_get_ip6_info()
6728 spec->l4_proto = rule->tuples.ip_proto; in hclge_fd_get_ip6_info()
6729 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? in hclge_fd_get_ip6_info()
6730 0 : rule->tuples_mask.ip_proto; in hclge_fd_get_ip6_info()
6737 ether_addr_copy(spec->h_source, rule->tuples.src_mac); in hclge_fd_get_ether_info()
6738 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac); in hclge_fd_get_ether_info()
6740 if (rule->unused_tuple & BIT(INNER_SRC_MAC)) in hclge_fd_get_ether_info()
6741 eth_zero_addr(spec_mask->h_source); in hclge_fd_get_ether_info()
6743 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac); in hclge_fd_get_ether_info()
6745 if (rule->unused_tuple & BIT(INNER_DST_MAC)) in hclge_fd_get_ether_info()
6746 eth_zero_addr(spec_mask->h_dest); in hclge_fd_get_ether_info()
6748 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac); in hclge_fd_get_ether_info()
6750 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto); in hclge_fd_get_ether_info()
6751 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? in hclge_fd_get_ether_info()
6752 0 : cpu_to_be16(rule->tuples_mask.ether_proto); in hclge_fd_get_ether_info()
6758 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) == in hclge_fd_get_user_def_info()
6760 fs->h_ext.data[0] = 0; in hclge_fd_get_user_def_info()
6761 fs->h_ext.data[1] = 0; in hclge_fd_get_user_def_info()
6762 fs->m_ext.data[0] = 0; in hclge_fd_get_user_def_info()
6763 fs->m_ext.data[1] = 0; in hclge_fd_get_user_def_info()
6765 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset); in hclge_fd_get_user_def_info()
6766 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data); in hclge_fd_get_user_def_info()
6767 fs->m_ext.data[0] = in hclge_fd_get_user_def_info()
6769 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask); in hclge_fd_get_user_def_info()
6776 if (fs->flow_type & FLOW_EXT) { in hclge_fd_get_ext_info()
6777 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); in hclge_fd_get_ext_info()
6778 fs->m_ext.vlan_tci = in hclge_fd_get_ext_info()
6779 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? in hclge_fd_get_ext_info()
6780 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); in hclge_fd_get_ext_info()
6785 if (fs->flow_type & FLOW_MAC_EXT) { in hclge_fd_get_ext_info()
6786 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); in hclge_fd_get_ext_info()
6787 if (rule->unused_tuple & BIT(INNER_DST_MAC)) in hclge_fd_get_ext_info()
6788 eth_zero_addr(fs->m_u.ether_spec.h_dest); in hclge_fd_get_ext_info()
6790 ether_addr_copy(fs->m_u.ether_spec.h_dest, in hclge_fd_get_ext_info()
6791 rule->tuples_mask.dst_mac); in hclge_fd_get_ext_info()
6801 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { in hclge_get_fd_rule()
6802 if (rule->location == location) in hclge_get_fd_rule()
6804 else if (rule->location > location) in hclge_get_fd_rule()
6814 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { in hclge_fd_get_ring_cookie()
6815 fs->ring_cookie = RX_CLS_FLOW_DISC; in hclge_fd_get_ring_cookie()
6819 fs->ring_cookie = rule->queue_id; in hclge_fd_get_ring_cookie()
6820 vf_id = rule->vf_id; in hclge_fd_get_ring_cookie()
6822 fs->ring_cookie |= vf_id; in hclge_fd_get_ring_cookie()
6831 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_info()
6834 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_get_fd_rule_info()
6835 return -EOPNOTSUPP; in hclge_get_fd_rule_info()
6837 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; in hclge_get_fd_rule_info()
6839 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6841 rule = hclge_get_fd_rule(hdev, fs->location); in hclge_get_fd_rule_info()
6843 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6844 return -ENOENT; in hclge_get_fd_rule_info()
6847 fs->flow_type = rule->flow_type; in hclge_get_fd_rule_info()
6848 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { in hclge_get_fd_rule_info()
6852 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec, in hclge_get_fd_rule_info()
6853 &fs->m_u.tcp_ip4_spec); in hclge_get_fd_rule_info()
6856 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec, in hclge_get_fd_rule_info()
6857 &fs->m_u.usr_ip4_spec); in hclge_get_fd_rule_info()
6862 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec, in hclge_get_fd_rule_info()
6863 &fs->m_u.tcp_ip6_spec); in hclge_get_fd_rule_info()
6866 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, in hclge_get_fd_rule_info()
6867 &fs->m_u.usr_ip6_spec); in hclge_get_fd_rule_info()
6874 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec, in hclge_get_fd_rule_info()
6875 &fs->m_u.ether_spec); in hclge_get_fd_rule_info()
6883 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6892 struct hclge_dev *hdev = vport->back; in hclge_get_all_rules()
6897 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_get_all_rules()
6898 return -EOPNOTSUPP; in hclge_get_all_rules()
6900 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_all_rules()
6902 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
6904 &hdev->fd_rule_list, rule_node) { in hclge_get_all_rules()
6905 if (cnt == cmd->rule_cnt) { in hclge_get_all_rules()
6906 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
6907 return -EMSGSIZE; in hclge_get_all_rules()
6910 if (rule->state == HCLGE_FD_TO_DEL) in hclge_get_all_rules()
6913 rule_locs[cnt] = rule->location; in hclge_get_all_rules()
6917 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
6919 cmd->rule_cnt = cnt; in hclge_get_all_rules()
6927 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 in hclge_fd_get_flow_tuples()
6928 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 in hclge_fd_get_flow_tuples()
6930 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); in hclge_fd_get_flow_tuples()
6931 tuples->ip_proto = fkeys->basic.ip_proto; in hclge_fd_get_flow_tuples()
6932 tuples->dst_port = be16_to_cpu(fkeys->ports.dst); in hclge_fd_get_flow_tuples()
6934 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { in hclge_fd_get_flow_tuples()
6935 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); in hclge_fd_get_flow_tuples()
6936 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); in hclge_fd_get_flow_tuples()
6941 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); in hclge_fd_get_flow_tuples()
6942 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); in hclge_fd_get_flow_tuples()
6955 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_fd_search_flow_keys()
6956 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples))) in hclge_fd_search_flow_keys()
6966 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | in hclge_fd_build_arfs_rule()
6969 rule->action = 0; in hclge_fd_build_arfs_rule()
6970 rule->vf_id = 0; in hclge_fd_build_arfs_rule()
6971 rule->rule_type = HCLGE_FD_ARFS_ACTIVE; in hclge_fd_build_arfs_rule()
6972 rule->state = HCLGE_FD_TO_ADD; in hclge_fd_build_arfs_rule()
6973 if (tuples->ether_proto == ETH_P_IP) { in hclge_fd_build_arfs_rule()
6974 if (tuples->ip_proto == IPPROTO_TCP) in hclge_fd_build_arfs_rule()
6975 rule->flow_type = TCP_V4_FLOW; in hclge_fd_build_arfs_rule()
6977 rule->flow_type = UDP_V4_FLOW; in hclge_fd_build_arfs_rule()
6979 if (tuples->ip_proto == IPPROTO_TCP) in hclge_fd_build_arfs_rule()
6980 rule->flow_type = TCP_V6_FLOW; in hclge_fd_build_arfs_rule()
6982 rule->flow_type = UDP_V6_FLOW; in hclge_fd_build_arfs_rule()
6984 memcpy(&rule->tuples, tuples, sizeof(rule->tuples)); in hclge_fd_build_arfs_rule()
6985 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask)); in hclge_fd_build_arfs_rule()
6993 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry_by_arfs()
6997 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_add_fd_entry_by_arfs()
6998 return -EOPNOTSUPP; in hclge_add_fd_entry_by_arfs()
7003 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7004 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && in hclge_add_fd_entry_by_arfs()
7005 hdev->fd_active_type != HCLGE_FD_RULE_NONE) { in hclge_add_fd_entry_by_arfs()
7006 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7007 return -EOPNOTSUPP; in hclge_add_fd_entry_by_arfs()
7019 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); in hclge_add_fd_entry_by_arfs()
7020 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_add_fd_entry_by_arfs()
7021 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7022 return -ENOSPC; in hclge_add_fd_entry_by_arfs()
7027 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7028 return -ENOMEM; in hclge_add_fd_entry_by_arfs()
7031 rule->location = bit_id; in hclge_add_fd_entry_by_arfs()
7032 rule->arfs.flow_id = flow_id; in hclge_add_fd_entry_by_arfs()
7033 rule->queue_id = queue_id; in hclge_add_fd_entry_by_arfs()
7035 hclge_update_fd_list(hdev, rule->state, rule->location, rule); in hclge_add_fd_entry_by_arfs()
7036 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; in hclge_add_fd_entry_by_arfs()
7037 } else if (rule->queue_id != queue_id) { in hclge_add_fd_entry_by_arfs()
7038 rule->queue_id = queue_id; in hclge_add_fd_entry_by_arfs()
7039 rule->state = HCLGE_FD_TO_ADD; in hclge_add_fd_entry_by_arfs()
7040 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_add_fd_entry_by_arfs()
7043 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7044 return rule->location; in hclge_add_fd_entry_by_arfs()
7050 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_rfs_filter_expire()
7054 spin_lock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7055 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { in hclge_rfs_filter_expire()
7056 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7059 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_rfs_filter_expire()
7060 if (rule->state != HCLGE_FD_ACTIVE) in hclge_rfs_filter_expire()
7062 if (rps_may_expire_flow(handle->netdev, rule->queue_id, in hclge_rfs_filter_expire()
7063 rule->arfs.flow_id, rule->location)) { in hclge_rfs_filter_expire()
7064 rule->state = HCLGE_FD_TO_DEL; in hclge_rfs_filter_expire()
7065 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_rfs_filter_expire()
7068 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7080 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) in hclge_clear_arfs_rules()
7083 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_clear_arfs_rules()
7084 switch (rule->state) { in hclge_clear_arfs_rules()
7088 rule->location, NULL, false); in hclge_clear_arfs_rules()
7093 hclge_fd_dec_rule_cnt(hdev, rule->location); in hclge_clear_arfs_rules()
7094 hlist_del(&rule->rule_node); in hclge_clear_arfs_rules()
7115 ethtype_key = ntohs(match.key->n_proto); in hclge_get_cls_key_basic()
7116 ethtype_mask = ntohs(match.mask->n_proto); in hclge_get_cls_key_basic()
7122 rule->tuples.ether_proto = ethtype_key; in hclge_get_cls_key_basic()
7123 rule->tuples_mask.ether_proto = ethtype_mask; in hclge_get_cls_key_basic()
7124 rule->tuples.ip_proto = match.key->ip_proto; in hclge_get_cls_key_basic()
7125 rule->tuples_mask.ip_proto = match.mask->ip_proto; in hclge_get_cls_key_basic()
7127 rule->unused_tuple |= BIT(INNER_IP_PROTO); in hclge_get_cls_key_basic()
7128 rule->unused_tuple |= BIT(INNER_ETH_TYPE); in hclge_get_cls_key_basic()
7139 ether_addr_copy(rule->tuples.dst_mac, match.key->dst); in hclge_get_cls_key_mac()
7140 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst); in hclge_get_cls_key_mac()
7141 ether_addr_copy(rule->tuples.src_mac, match.key->src); in hclge_get_cls_key_mac()
7142 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src); in hclge_get_cls_key_mac()
7144 rule->unused_tuple |= BIT(INNER_DST_MAC); in hclge_get_cls_key_mac()
7145 rule->unused_tuple |= BIT(INNER_SRC_MAC); in hclge_get_cls_key_mac()
7156 rule->tuples.vlan_tag1 = match.key->vlan_id | in hclge_get_cls_key_vlan()
7157 (match.key->vlan_priority << VLAN_PRIO_SHIFT); in hclge_get_cls_key_vlan()
7158 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id | in hclge_get_cls_key_vlan()
7159 (match.mask->vlan_priority << VLAN_PRIO_SHIFT); in hclge_get_cls_key_vlan()
7161 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST); in hclge_get_cls_key_vlan()
7174 addr_type = match.key->addr_type; in hclge_get_cls_key_ip()
7181 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src); in hclge_get_cls_key_ip()
7182 rule->tuples_mask.src_ip[IPV4_INDEX] = in hclge_get_cls_key_ip()
7183 be32_to_cpu(match.mask->src); in hclge_get_cls_key_ip()
7184 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst); in hclge_get_cls_key_ip()
7185 rule->tuples_mask.dst_ip[IPV4_INDEX] = in hclge_get_cls_key_ip()
7186 be32_to_cpu(match.mask->dst); in hclge_get_cls_key_ip()
7191 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32, in hclge_get_cls_key_ip()
7193 be32_to_cpu_array(rule->tuples_mask.src_ip, in hclge_get_cls_key_ip()
7194 match.mask->src.s6_addr32, IPV6_SIZE); in hclge_get_cls_key_ip()
7195 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32, in hclge_get_cls_key_ip()
7197 be32_to_cpu_array(rule->tuples_mask.dst_ip, in hclge_get_cls_key_ip()
7198 match.mask->dst.s6_addr32, IPV6_SIZE); in hclge_get_cls_key_ip()
7200 rule->unused_tuple |= BIT(INNER_SRC_IP); in hclge_get_cls_key_ip()
7201 rule->unused_tuple |= BIT(INNER_DST_IP); in hclge_get_cls_key_ip()
7213 rule->tuples.src_port = be16_to_cpu(match.key->src); in hclge_get_cls_key_port()
7214 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src); in hclge_get_cls_key_port()
7215 rule->tuples.dst_port = be16_to_cpu(match.key->dst); in hclge_get_cls_key_port()
7216 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst); in hclge_get_cls_key_port()
7218 rule->unused_tuple |= BIT(INNER_SRC_PORT); in hclge_get_cls_key_port()
7219 rule->unused_tuple |= BIT(INNER_DST_PORT); in hclge_get_cls_key_port()
7228 struct flow_dissector *dissector = flow->match.dissector; in hclge_parse_cls_flower()
7230 if (dissector->used_keys & in hclge_parse_cls_flower()
7238 dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n", in hclge_parse_cls_flower()
7239 dissector->used_keys); in hclge_parse_cls_flower()
7240 return -EOPNOTSUPP; in hclge_parse_cls_flower()
7255 u32 prio = cls_flower->common.prio; in hclge_check_cls_flower()
7257 if (tc < 0 || tc > hdev->tc_max) { in hclge_check_cls_flower()
7258 dev_err(&hdev->pdev->dev, "invalid traffic class\n"); in hclge_check_cls_flower()
7259 return -EINVAL; in hclge_check_cls_flower()
7263 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_check_cls_flower()
7264 dev_err(&hdev->pdev->dev, in hclge_check_cls_flower()
7266 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); in hclge_check_cls_flower()
7267 return -EINVAL; in hclge_check_cls_flower()
7270 if (test_bit(prio - 1, hdev->fd_bmap)) { in hclge_check_cls_flower()
7271 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); in hclge_check_cls_flower()
7272 return -EINVAL; in hclge_check_cls_flower()
7282 struct hclge_dev *hdev = vport->back; in hclge_add_cls_flower()
7286 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_add_cls_flower()
7287 dev_err(&hdev->pdev->dev, in hclge_add_cls_flower()
7289 return -EOPNOTSUPP; in hclge_add_cls_flower()
7294 dev_err(&hdev->pdev->dev, in hclge_add_cls_flower()
7301 return -ENOMEM; in hclge_add_cls_flower()
7309 rule->action = HCLGE_FD_ACTION_SELECT_TC; in hclge_add_cls_flower()
7310 rule->cls_flower.tc = tc; in hclge_add_cls_flower()
7311 rule->location = cls_flower->common.prio - 1; in hclge_add_cls_flower()
7312 rule->vf_id = 0; in hclge_add_cls_flower()
7313 rule->cls_flower.cookie = cls_flower->cookie; in hclge_add_cls_flower()
7314 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; in hclge_add_cls_flower()
7329 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_find_cls_flower()
7330 if (rule->cls_flower.cookie == cookie) in hclge_find_cls_flower()
7341 struct hclge_dev *hdev = vport->back; in hclge_del_cls_flower()
7345 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_cls_flower()
7346 return -EOPNOTSUPP; in hclge_del_cls_flower()
7348 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7350 rule = hclge_find_cls_flower(hdev, cls_flower->cookie); in hclge_del_cls_flower()
7352 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7353 return -EINVAL; in hclge_del_cls_flower()
7356 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, in hclge_del_cls_flower()
7363 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL); in hclge_del_cls_flower()
7364 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_del_cls_flower()
7365 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7369 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); in hclge_del_cls_flower()
7370 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7381 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) in hclge_sync_fd_list()
7384 spin_lock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_list()
7387 switch (rule->state) { in hclge_sync_fd_list()
7392 rule->state = HCLGE_FD_ACTIVE; in hclge_sync_fd_list()
7396 rule->location, NULL, false); in hclge_sync_fd_list()
7399 hclge_fd_dec_rule_cnt(hdev, rule->location); in hclge_sync_fd_list()
7409 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_sync_fd_list()
7411 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_list()
7416 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_sync_fd_table()
7419 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { in hclge_sync_fd_table()
7420 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; in hclge_sync_fd_table()
7427 hclge_sync_fd_list(hdev, &hdev->fd_rule_list); in hclge_sync_fd_table()
7433 struct hclge_dev *hdev = vport->back; in hclge_get_hw_reset_stat()
7435 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || in hclge_get_hw_reset_stat()
7436 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); in hclge_get_hw_reset_stat()
7442 struct hclge_dev *hdev = vport->back; in hclge_get_cmdq_stat()
7444 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_get_cmdq_stat()
7450 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_resetting()
7452 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_ae_dev_resetting()
7458 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_reset_cnt()
7460 return hdev->rst_stats.hw_reset_done_cnt; in hclge_ae_dev_reset_cnt()
7466 struct hclge_dev *hdev = vport->back; in hclge_enable_fd()
7468 hdev->fd_en = enable; in hclge_enable_fd()
7471 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); in hclge_enable_fd()
7503 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); in hclge_cfg_mac_mode()
7505 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_mode()
7507 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_mode()
7531 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; in hclge_config_switch_param()
7532 req->func_id = cpu_to_le32(func_id); in hclge_config_switch_param()
7534 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
7536 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
7543 req->switch_param = (req->switch_param & param_mask) | switch_param; in hclge_config_switch_param()
7544 req->param_mask = param_mask; in hclge_config_switch_param()
7546 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
7548 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
7558 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_phy_link_status_wait()
7565 dev_err(&hdev->pdev->dev, in hclge_phy_link_status_wait()
7570 if (phydev->link == link_ret) in hclge_phy_link_status_wait()
7593 return -EBUSY; in hclge_mac_link_status_wait()
7622 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
7624 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
7630 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); in hclge_set_app_loopback()
7633 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); in hclge_set_app_loopback()
7639 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
7641 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
7668 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_cmd_send()
7670 return -ENOTSUPP; in hclge_cfg_common_loopback_cmd_send()
7673 req->mask = loop_mode_b; in hclge_cfg_common_loopback_cmd_send()
7675 req->enable = loop_mode_b; in hclge_cfg_common_loopback_cmd_send()
7677 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_common_loopback_cmd_send()
7679 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_cmd_send()
7702 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_common_loopback_wait()
7704 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_wait()
7710 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); in hclge_cfg_common_loopback_wait()
7712 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { in hclge_cfg_common_loopback_wait()
7713 dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); in hclge_cfg_common_loopback_wait()
7714 return -EBUSY; in hclge_cfg_common_loopback_wait()
7715 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { in hclge_cfg_common_loopback_wait()
7716 dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); in hclge_cfg_common_loopback_wait()
7717 return -EIO; in hclge_cfg_common_loopback_wait()
7748 dev_err(&hdev->pdev->dev, in hclge_set_common_loopback()
7759 if (!phydev->suspended) { in hclge_enable_phy_loopback()
7786 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_phy_loopback()
7793 return -ENOTSUPP; in hclge_set_phy_loopback()
7801 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
7810 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
7824 req->tqp_id = cpu_to_le16(tqp_id); in hclge_tqp_enable_cmd_send()
7825 req->stream_id = cpu_to_le16(stream_id); in hclge_tqp_enable_cmd_send()
7827 req->enable |= 1U << HCLGE_TQP_ENABLE_B; in hclge_tqp_enable_cmd_send()
7829 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_tqp_enable_cmd_send()
7835 struct hclge_dev *hdev = vport->back; in hclge_tqp_enable()
7839 for (i = 0; i < handle->kinfo.num_tqps; i++) { in hclge_tqp_enable()
7851 struct hclge_dev *hdev = vport->back; in hclge_set_loopback()
7859 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_set_loopback()
7882 ret = -ENOTSUPP; in hclge_set_loopback()
7883 dev_err(&hdev->pdev->dev, in hclge_set_loopback()
7893 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", in hclge_set_loopback()
7919 unsigned long last = hdev->serv_processed_cnt; in hclge_flush_link_update()
7922 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && in hclge_flush_link_update()
7924 last == hdev->serv_processed_cnt) in hclge_flush_link_update()
7931 struct hclge_dev *hdev = vport->back; in hclge_set_timer_task()
7937 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_set_timer_task()
7948 struct hclge_dev *hdev = vport->back; in hclge_ae_start()
7952 clear_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_start()
7953 hdev->hw.mac.link = 0; in hclge_ae_start()
7966 struct hclge_dev *hdev = vport->back; in hclge_ae_stop()
7968 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_stop()
7969 spin_lock_bh(&hdev->fd_rule_lock); in hclge_ae_stop()
7971 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_ae_stop()
7976 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { in hclge_ae_stop()
7979 if (hdev->reset_type != HNAE3_FUNC_RESET && in hclge_ae_stop()
7980 hdev->reset_type != HNAE3_FLR_RESET) { in hclge_ae_stop()
8003 struct hclge_dev *hdev = vport->back; in hclge_vport_start()
8005 set_bit(HCLGE_VPORT_STATE_INITED, &vport->state); in hclge_vport_start()
8006 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_vport_start()
8007 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); in hclge_vport_start()
8008 vport->last_active_jiffies = jiffies; in hclge_vport_start()
8009 vport->need_notify = 0; in hclge_vport_start()
8011 if (test_bit(vport->vport_id, hdev->vport_config_block)) { in hclge_vport_start()
8012 if (vport->vport_id) { in hclge_vport_start()
8020 clear_bit(vport->vport_id, hdev->vport_config_block); in hclge_vport_start()
8027 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); in hclge_vport_stop()
8028 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_vport_stop()
8029 vport->need_notify = 0; in hclge_vport_stop()
8050 struct hclge_dev *hdev = vport->back; in hclge_get_mac_vlan_cmd_status()
8053 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8056 return -EIO; in hclge_get_mac_vlan_cmd_status()
8064 return -ENOSPC; in hclge_get_mac_vlan_cmd_status()
8066 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8069 return -EIO; in hclge_get_mac_vlan_cmd_status()
8074 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8076 return -ENOENT; in hclge_get_mac_vlan_cmd_status()
8079 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8082 return -EIO; in hclge_get_mac_vlan_cmd_status()
8087 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8089 return -ENOENT; in hclge_get_mac_vlan_cmd_status()
8092 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8095 return -EIO; in hclge_get_mac_vlan_cmd_status()
8098 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8101 return -EINVAL; in hclge_get_mac_vlan_cmd_status()
8112 return -EIO; in hclge_update_desc_vfid()
8122 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; in hclge_update_desc_vfid()
8155 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); in hclge_prepare_mac_addr()
8157 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); in hclge_prepare_mac_addr()
8158 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); in hclge_prepare_mac_addr()
8161 new_req->mac_addr_hi32 = cpu_to_le32(high_val); in hclge_prepare_mac_addr()
8162 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); in hclge_prepare_mac_addr()
8168 struct hclge_dev *hdev = vport->back; in hclge_remove_mac_vlan_tbl()
8178 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_remove_mac_vlan_tbl()
8180 dev_err(&hdev->pdev->dev, in hclge_remove_mac_vlan_tbl()
8197 struct hclge_dev *hdev = vport->back; in hclge_lookup_mac_vlan_tbl()
8215 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_lookup_mac_vlan_tbl()
8220 ret = hclge_cmd_send(&hdev->hw, desc, 1); in hclge_lookup_mac_vlan_tbl()
8223 dev_err(&hdev->pdev->dev, in hclge_lookup_mac_vlan_tbl()
8239 struct hclge_dev *hdev = vport->back; in hclge_add_mac_vlan_tbl()
8253 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mac_vlan_tbl()
8269 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); in hclge_add_mac_vlan_tbl()
8279 dev_err(&hdev->pdev->dev, in hclge_add_mac_vlan_tbl()
8298 req->space_size = cpu_to_le32(space_size); in hclge_set_umv_space()
8300 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_umv_space()
8302 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", in hclge_set_umv_space()
8317 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); in hclge_init_umv_space()
8321 if (allocated_size < hdev->wanted_umv_size) in hclge_init_umv_space()
8322 dev_warn(&hdev->pdev->dev, in hclge_init_umv_space()
8324 hdev->wanted_umv_size, allocated_size); in hclge_init_umv_space()
8326 hdev->max_umv_size = allocated_size; in hclge_init_umv_space()
8327 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); in hclge_init_umv_space()
8328 hdev->share_umv_size = hdev->priv_umv_size + in hclge_init_umv_space()
8329 hdev->max_umv_size % (hdev->num_alloc_vport + 1); in hclge_init_umv_space()
8331 if (hdev->ae_dev->dev_specs.mc_mac_size) in hclge_init_umv_space()
8332 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); in hclge_init_umv_space()
8342 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_umv_space()
8343 vport = &hdev->vport[i]; in hclge_reset_umv_space()
8344 vport->used_umv_num = 0; in hclge_reset_umv_space()
8347 mutex_lock(&hdev->vport_lock); in hclge_reset_umv_space()
8348 hdev->share_umv_size = hdev->priv_umv_size + in hclge_reset_umv_space()
8349 hdev->max_umv_size % (hdev->num_alloc_vport + 1); in hclge_reset_umv_space()
8350 mutex_unlock(&hdev->vport_lock); in hclge_reset_umv_space()
8352 hdev->used_mc_mac_num = 0; in hclge_reset_umv_space()
8357 struct hclge_dev *hdev = vport->back; in hclge_is_umv_space_full()
8361 mutex_lock(&hdev->vport_lock); in hclge_is_umv_space_full()
8363 is_full = (vport->used_umv_num >= hdev->priv_umv_size && in hclge_is_umv_space_full()
8364 hdev->share_umv_size == 0); in hclge_is_umv_space_full()
8367 mutex_unlock(&hdev->vport_lock); in hclge_is_umv_space_full()
8374 struct hclge_dev *hdev = vport->back; in hclge_update_umv_space()
8377 if (vport->used_umv_num > hdev->priv_umv_size) in hclge_update_umv_space()
8378 hdev->share_umv_size++; in hclge_update_umv_space()
8380 if (vport->used_umv_num > 0) in hclge_update_umv_space()
8381 vport->used_umv_num--; in hclge_update_umv_space()
8383 if (vport->used_umv_num >= hdev->priv_umv_size && in hclge_update_umv_space()
8384 hdev->share_umv_size > 0) in hclge_update_umv_space()
8385 hdev->share_umv_size--; in hclge_update_umv_space()
8386 vport->used_umv_num++; in hclge_update_umv_space()
8396 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) in hclge_find_mac_node()
8408 if (mac_node->state == HCLGE_MAC_TO_DEL) in hclge_update_mac_node()
8409 mac_node->state = HCLGE_MAC_ACTIVE; in hclge_update_mac_node()
8413 if (mac_node->state == HCLGE_MAC_TO_ADD) { in hclge_update_mac_node()
8414 list_del(&mac_node->node); in hclge_update_mac_node()
8417 mac_node->state = HCLGE_MAC_TO_DEL; in hclge_update_mac_node()
8420 /* only from tmp_add_list, the mac_node->state won't be in hclge_update_mac_node()
8424 if (mac_node->state == HCLGE_MAC_TO_ADD) in hclge_update_mac_node()
8425 mac_node->state = HCLGE_MAC_ACTIVE; in hclge_update_mac_node()
8437 struct hclge_dev *hdev = vport->back; in hclge_update_mac_list()
8442 &vport->uc_mac_list : &vport->mc_mac_list; in hclge_update_mac_list()
8444 spin_lock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
8453 spin_unlock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
8454 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); in hclge_update_mac_list()
8460 spin_unlock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
8462 dev_err(&hdev->pdev->dev, in hclge_update_mac_list()
8465 return -ENOENT; in hclge_update_mac_list()
8470 spin_unlock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
8471 return -ENOMEM; in hclge_update_mac_list()
8474 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); in hclge_update_mac_list()
8476 mac_node->state = state; in hclge_update_mac_list()
8477 ether_addr_copy(mac_node->mac_addr, addr); in hclge_update_mac_list()
8478 list_add_tail(&mac_node->node, list); in hclge_update_mac_list()
8480 spin_unlock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
8498 struct hclge_dev *hdev = vport->back; in hclge_add_uc_addr_common()
8509 dev_err(&hdev->pdev->dev, in hclge_add_uc_addr_common()
8514 return -EINVAL; in hclge_add_uc_addr_common()
8520 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); in hclge_add_uc_addr_common()
8531 if (ret == -ENOENT) { in hclge_add_uc_addr_common()
8532 mutex_lock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8537 mutex_unlock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8540 mutex_unlock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8542 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) in hclge_add_uc_addr_common()
8543 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", in hclge_add_uc_addr_common()
8544 hdev->priv_umv_size); in hclge_add_uc_addr_common()
8546 return -ENOSPC; in hclge_add_uc_addr_common()
8551 return -EEXIST; in hclge_add_uc_addr_common()
8569 struct hclge_dev *hdev = vport->back; in hclge_rm_uc_addr_common()
8578 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", in hclge_rm_uc_addr_common()
8580 return -EINVAL; in hclge_rm_uc_addr_common()
8587 if (!ret || ret == -ENOENT) { in hclge_rm_uc_addr_common()
8588 mutex_lock(&hdev->vport_lock); in hclge_rm_uc_addr_common()
8590 mutex_unlock(&hdev->vport_lock); in hclge_rm_uc_addr_common()
8610 struct hclge_dev *hdev = vport->back; in hclge_add_mc_addr_common()
8619 dev_err(&hdev->pdev->dev, in hclge_add_mc_addr_common()
8622 return -EINVAL; in hclge_add_mc_addr_common()
8628 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && in hclge_add_mc_addr_common()
8629 hdev->used_mc_mac_num >= in hclge_add_mc_addr_common()
8630 hdev->ae_dev->dev_specs.mc_mac_size) in hclge_add_mc_addr_common()
8640 status = hclge_update_desc_vfid(desc, vport->vport_id, false); in hclge_add_mc_addr_common()
8644 if (status == -ENOSPC) in hclge_add_mc_addr_common()
8647 hdev->used_mc_mac_num++; in hclge_add_mc_addr_common()
8653 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) { in hclge_add_mc_addr_common()
8654 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; in hclge_add_mc_addr_common()
8655 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); in hclge_add_mc_addr_common()
8658 return -ENOSPC; in hclge_add_mc_addr_common()
8674 struct hclge_dev *hdev = vport->back; in hclge_rm_mc_addr_common()
8682 dev_dbg(&hdev->pdev->dev, in hclge_rm_mc_addr_common()
8685 return -EINVAL; in hclge_rm_mc_addr_common()
8693 status = hclge_update_desc_vfid(desc, vport->vport_id, true); in hclge_rm_mc_addr_common()
8701 hdev->used_mc_mac_num--; in hclge_rm_mc_addr_common()
8706 } else if (status == -ENOENT) { in hclge_rm_mc_addr_common()
8727 ret = sync(vport, mac_node->mac_addr); in hclge_sync_vport_mac_list()
8729 mac_node->state = HCLGE_MAC_ACTIVE; in hclge_sync_vport_mac_list()
8732 &vport->state); in hclge_sync_vport_mac_list()
8742 if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) || in hclge_sync_vport_mac_list()
8743 (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC)) in hclge_sync_vport_mac_list()
8763 ret = unsync(vport, mac_node->mac_addr); in hclge_unsync_vport_mac_list()
8764 if (!ret || ret == -ENOENT) { in hclge_unsync_vport_mac_list()
8765 list_del(&mac_node->node); in hclge_unsync_vport_mac_list()
8769 &vport->state); in hclge_unsync_vport_mac_list()
8782 if (mac_node->state == HCLGE_MAC_TO_ADD) in hclge_sync_from_add_list()
8793 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); in hclge_sync_from_add_list()
8795 hclge_update_mac_node(new_node, mac_node->state); in hclge_sync_from_add_list()
8796 list_del(&mac_node->node); in hclge_sync_from_add_list()
8798 } else if (mac_node->state == HCLGE_MAC_ACTIVE) { in hclge_sync_from_add_list()
8799 mac_node->state = HCLGE_MAC_TO_DEL; in hclge_sync_from_add_list()
8800 list_move_tail(&mac_node->node, mac_list); in hclge_sync_from_add_list()
8802 list_del(&mac_node->node); in hclge_sync_from_add_list()
8816 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); in hclge_sync_from_del_list()
8825 new_node->state = HCLGE_MAC_ACTIVE; in hclge_sync_from_del_list()
8826 list_del(&mac_node->node); in hclge_sync_from_del_list()
8829 list_move_tail(&mac_node->node, mac_list); in hclge_sync_from_del_list()
8840 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; in hclge_update_overflow_flags()
8842 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; in hclge_update_overflow_flags()
8845 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; in hclge_update_overflow_flags()
8847 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; in hclge_update_overflow_flags()
8866 &vport->uc_mac_list : &vport->mc_mac_list; in hclge_sync_vport_mac_table()
8868 spin_lock_bh(&vport->mac_list_lock); in hclge_sync_vport_mac_table()
8871 switch (mac_node->state) { in hclge_sync_vport_mac_table()
8873 list_move_tail(&mac_node->node, &tmp_del_list); in hclge_sync_vport_mac_table()
8879 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); in hclge_sync_vport_mac_table()
8880 new_node->state = mac_node->state; in hclge_sync_vport_mac_table()
8881 list_add_tail(&new_node->node, &tmp_add_list); in hclge_sync_vport_mac_table()
8889 spin_unlock_bh(&vport->mac_list_lock); in hclge_sync_vport_mac_table()
8898 spin_lock_bh(&vport->mac_list_lock); in hclge_sync_vport_mac_table()
8903 spin_unlock_bh(&vport->mac_list_lock); in hclge_sync_vport_mac_table()
8910 struct hclge_dev *hdev = vport->back; in hclge_need_sync_mac_table()
8912 if (test_bit(vport->vport_id, hdev->vport_config_block)) in hclge_need_sync_mac_table()
8915 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) in hclge_need_sync_mac_table()
8925 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_mac_table()
8926 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_mac_table()
8943 switch (mac_cfg->state) { in hclge_build_del_list()
8946 list_move_tail(&mac_cfg->node, tmp_del_list); in hclge_build_del_list()
8950 list_del(&mac_cfg->node); in hclge_build_del_list()
8968 ret = unsync(vport, mac_cfg->mac_addr); in hclge_unsync_del_list()
8969 if (!ret || ret == -ENOENT) { in hclge_unsync_del_list()
8975 mac_cfg->state == HCLGE_MAC_ACTIVE) { in hclge_unsync_del_list()
8976 mac_cfg->state = HCLGE_MAC_TO_ADD; in hclge_unsync_del_list()
8978 list_del(&mac_cfg->node); in hclge_unsync_del_list()
8982 mac_cfg->state = HCLGE_MAC_TO_DEL; in hclge_unsync_del_list()
8991 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_mac_table()
8995 list = &vport->uc_mac_list; in hclge_rm_vport_all_mac_table()
8998 list = &vport->mc_mac_list; in hclge_rm_vport_all_mac_table()
9005 set_bit(vport->vport_id, hdev->vport_config_block); in hclge_rm_vport_all_mac_table()
9007 spin_lock_bh(&vport->mac_list_lock); in hclge_rm_vport_all_mac_table()
9011 spin_unlock_bh(&vport->mac_list_lock); in hclge_rm_vport_all_mac_table()
9015 spin_lock_bh(&vport->mac_list_lock); in hclge_rm_vport_all_mac_table()
9019 spin_unlock_bh(&vport->mac_list_lock); in hclge_rm_vport_all_mac_table()
9027 struct hclge_dev *hdev = vport->back; in hclge_uninit_vport_mac_list()
9033 &vport->uc_mac_list : &vport->mc_mac_list; in hclge_uninit_vport_mac_list()
9035 spin_lock_bh(&vport->mac_list_lock); in hclge_uninit_vport_mac_list()
9038 switch (mac_node->state) { in hclge_uninit_vport_mac_list()
9041 list_move_tail(&mac_node->node, &tmp_del_list); in hclge_uninit_vport_mac_list()
9044 list_del(&mac_node->node); in hclge_uninit_vport_mac_list()
9050 spin_unlock_bh(&vport->mac_list_lock); in hclge_uninit_vport_mac_list()
9055 dev_warn(&hdev->pdev->dev, in hclge_uninit_vport_mac_list()
9058 vport->vport_id); in hclge_uninit_vport_mac_list()
9061 list_del(&mac_node->node); in hclge_uninit_vport_mac_list()
9071 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_mac_table()
9072 vport = &hdev->vport[i]; in hclge_uninit_mac_table()
9089 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9092 return -EIO; in hclge_get_mac_ethertype_cmd_status()
9101 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9103 return_status = -EIO; in hclge_get_mac_ethertype_cmd_status()
9106 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9108 return_status = -EIO; in hclge_get_mac_ethertype_cmd_status()
9111 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9114 return_status = -EIO; in hclge_get_mac_ethertype_cmd_status()
9125 struct hclge_dev *hdev = vport->back; in hclge_set_vf_mac()
9129 return -EINVAL; in hclge_set_vf_mac()
9132 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { in hclge_set_vf_mac()
9133 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9139 ether_addr_copy(vport->vf_info.mac, mac_addr); in hclge_set_vf_mac()
9145 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { in hclge_set_vf_mac()
9146 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9153 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9170 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mgr_tbl()
9172 dev_err(&hdev->pdev->dev, in hclge_add_mgr_tbl()
9192 dev_err(&hdev->pdev->dev, in init_mgr_tbl()
9205 struct hclge_dev *hdev = vport->back; in hclge_get_mac_addr()
9207 ether_addr_copy(p, hdev->hw.mac.mac_addr); in hclge_get_mac_addr()
9213 struct list_head *list = &vport->uc_mac_list; in hclge_update_mac_node_for_dev_addr()
9220 return -ENOMEM; in hclge_update_mac_node_for_dev_addr()
9222 new_node->state = HCLGE_MAC_TO_ADD; in hclge_update_mac_node_for_dev_addr()
9223 ether_addr_copy(new_node->mac_addr, new_addr); in hclge_update_mac_node_for_dev_addr()
9224 list_add(&new_node->node, list); in hclge_update_mac_node_for_dev_addr()
9226 if (new_node->state == HCLGE_MAC_TO_DEL) in hclge_update_mac_node_for_dev_addr()
9227 new_node->state = HCLGE_MAC_ACTIVE; in hclge_update_mac_node_for_dev_addr()
9230 * addr may be not re-added into mac table for the umv space in hclge_update_mac_node_for_dev_addr()
9234 list_move(&new_node->node, list); in hclge_update_mac_node_for_dev_addr()
9240 if (old_node->state == HCLGE_MAC_TO_ADD) { in hclge_update_mac_node_for_dev_addr()
9241 list_del(&old_node->node); in hclge_update_mac_node_for_dev_addr()
9244 old_node->state = HCLGE_MAC_TO_DEL; in hclge_update_mac_node_for_dev_addr()
9249 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); in hclge_update_mac_node_for_dev_addr()
9260 struct hclge_dev *hdev = vport->back; in hclge_set_mac_addr()
9269 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9272 return -EINVAL; in hclge_set_mac_addr()
9277 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9284 old_addr = hdev->hw.mac.mac_addr; in hclge_set_mac_addr()
9286 spin_lock_bh(&vport->mac_list_lock); in hclge_set_mac_addr()
9290 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9293 spin_unlock_bh(&vport->mac_list_lock); in hclge_set_mac_addr()
9303 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); in hclge_set_mac_addr()
9304 spin_unlock_bh(&vport->mac_list_lock); in hclge_set_mac_addr()
9316 return -EOPNOTSUPP; in hclge_mii_ioctl()
9320 data->phy_id = hdev->hw.mac.phy_addr; in hclge_mii_ioctl()
9324 data->val_out = hclge_read_phy_reg(hdev, data->reg_num); in hclge_mii_ioctl()
9328 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); in hclge_mii_ioctl()
9330 return -EOPNOTSUPP; in hclge_mii_ioctl()
9338 struct hclge_dev *hdev = vport->back; in hclge_do_ioctl()
9346 if (!hdev->hw.mac.phydev) in hclge_do_ioctl()
9350 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); in hclge_do_ioctl()
9362 req->vf_id = vf_id; in hclge_set_port_vlan_filter_bypass()
9363 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B, in hclge_set_port_vlan_filter_bypass()
9366 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_port_vlan_filter_bypass()
9368 dev_err(&hdev->pdev->dev, in hclge_set_port_vlan_filter_bypass()
9385 req->vlan_type = vlan_type; in hclge_set_vlan_filter_ctrl()
9386 req->vf_id = vf_id; in hclge_set_vlan_filter_ctrl()
9388 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
9390 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", in hclge_set_vlan_filter_ctrl()
9397 req->vlan_fe = filter_en ? in hclge_set_vlan_filter_ctrl()
9398 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); in hclge_set_vlan_filter_ctrl()
9400 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
9402 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", in hclge_set_vlan_filter_ctrl()
9410 struct hclge_dev *hdev = vport->back; in hclge_set_vport_vlan_filter()
9411 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclge_set_vport_vlan_filter()
9414 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vport_vlan_filter()
9417 enable, vport->vport_id); in hclge_set_vport_vlan_filter()
9421 vport->vport_id); in hclge_set_vport_vlan_filter()
9425 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) { in hclge_set_vport_vlan_filter()
9426 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, in hclge_set_vport_vlan_filter()
9428 } else if (!vport->vport_id) { in hclge_set_vport_vlan_filter()
9429 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) in hclge_set_vport_vlan_filter()
9442 struct hnae3_handle *handle = &vport->nic; in hclge_need_enable_vport_vlan_filter()
9444 struct hclge_dev *hdev = vport->back; in hclge_need_enable_vport_vlan_filter()
9446 if (vport->vport_id) { in hclge_need_enable_vport_vlan_filter()
9447 if (vport->port_base_vlan_cfg.state != in hclge_need_enable_vport_vlan_filter()
9451 if (vport->vf_info.trusted && vport->vf_info.request_uc_en) in hclge_need_enable_vport_vlan_filter()
9453 } else if (handle->netdev_flags & HNAE3_USER_UPE) { in hclge_need_enable_vport_vlan_filter()
9457 if (!vport->req_vlan_fltr_en) in hclge_need_enable_vport_vlan_filter()
9461 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) in hclge_need_enable_vport_vlan_filter()
9464 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) in hclge_need_enable_vport_vlan_filter()
9465 if (vlan->vlan_id != 0) in hclge_need_enable_vport_vlan_filter()
9473 struct hclge_dev *hdev = vport->back; in hclge_enable_vport_vlan_filter()
9477 mutex_lock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9479 vport->req_vlan_fltr_en = request_en; in hclge_enable_vport_vlan_filter()
9482 if (need_en == vport->cur_vlan_fltr_en) { in hclge_enable_vport_vlan_filter()
9483 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9489 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9493 vport->cur_vlan_fltr_en = need_en; in hclge_enable_vport_vlan_filter()
9495 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9530 req0->vlan_id = cpu_to_le16(vlan); in hclge_set_vf_vlan_filter_cmd()
9531 req0->vlan_cfg = is_kill; in hclge_set_vf_vlan_filter_cmd()
9534 req0->vf_bitmap[vf_byte_off] = vf_byte_val; in hclge_set_vf_vlan_filter_cmd()
9536 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; in hclge_set_vf_vlan_filter_cmd()
9538 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_set_vf_vlan_filter_cmd()
9540 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_filter_cmd()
9558 if (!req->resp_code || req->resp_code == 1) in hclge_check_vf_vlan_cmd_status()
9561 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { in hclge_check_vf_vlan_cmd_status()
9562 set_bit(vfid, hdev->vf_vlan_full); in hclge_check_vf_vlan_cmd_status()
9563 dev_warn(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9568 dev_err(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9570 req->resp_code); in hclge_check_vf_vlan_cmd_status()
9573 if (!req->resp_code) in hclge_check_vf_vlan_cmd_status()
9581 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) in hclge_check_vf_vlan_cmd_status()
9584 dev_err(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9586 req->resp_code); in hclge_check_vf_vlan_cmd_status()
9589 return -EIO; in hclge_check_vf_vlan_cmd_status()
9595 struct hclge_vport *vport = &hdev->vport[vfid]; in hclge_set_vf_vlan_common()
9604 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { in hclge_set_vf_vlan_common()
9605 if (vport->vf_info.spoofchk && vlan) { in hclge_set_vf_vlan_common()
9606 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
9608 return -EPERM; in hclge_set_vf_vlan_common()
9638 req->vlan_offset = vlan_offset_160; in hclge_set_port_vlan_filter()
9639 req->vlan_cfg = is_kill; in hclge_set_port_vlan_filter()
9640 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; in hclge_set_port_vlan_filter()
9642 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_port_vlan_filter()
9644 dev_err(&hdev->pdev->dev, in hclge_set_port_vlan_filter()
9654 test_bit(vport_id, hdev->vlan_table[vlan_id])) in hclge_need_update_port_vlan()
9657 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_need_update_port_vlan()
9658 dev_warn(&hdev->pdev->dev, in hclge_need_update_port_vlan()
9665 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_need_update_port_vlan()
9666 dev_warn(&hdev->pdev->dev, in hclge_need_update_port_vlan()
9686 return -EINVAL; in hclge_set_vlan_filter_hw()
9690 dev_err(&hdev->pdev->dev, in hclge_set_vlan_filter_hw()
9699 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) in hclge_set_vlan_filter_hw()
9711 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; in hclge_set_vlan_tx_offload_cfg()
9713 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_tx_offload_cfg()
9721 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); in hclge_set_vlan_tx_offload_cfg()
9722 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); in hclge_set_vlan_tx_offload_cfg()
9723 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, in hclge_set_vlan_tx_offload_cfg()
9724 vcfg->accept_tag1 ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9725 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, in hclge_set_vlan_tx_offload_cfg()
9726 vcfg->accept_untag1 ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9727 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, in hclge_set_vlan_tx_offload_cfg()
9728 vcfg->accept_tag2 ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9729 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, in hclge_set_vlan_tx_offload_cfg()
9730 vcfg->accept_untag2 ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9731 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, in hclge_set_vlan_tx_offload_cfg()
9732 vcfg->insert_tag1_en ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9733 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, in hclge_set_vlan_tx_offload_cfg()
9734 vcfg->insert_tag2_en ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9735 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B, in hclge_set_vlan_tx_offload_cfg()
9736 vcfg->tag_shift_mode_en ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
9737 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); in hclge_set_vlan_tx_offload_cfg()
9739 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; in hclge_set_vlan_tx_offload_cfg()
9740 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / in hclge_set_vlan_tx_offload_cfg()
9742 req->vf_bitmap[bmap_index] = in hclge_set_vlan_tx_offload_cfg()
9743 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); in hclge_set_vlan_tx_offload_cfg()
9745 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_tx_offload_cfg()
9747 dev_err(&hdev->pdev->dev, in hclge_set_vlan_tx_offload_cfg()
9756 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; in hclge_set_vlan_rx_offload_cfg()
9758 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_rx_offload_cfg()
9766 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, in hclge_set_vlan_rx_offload_cfg()
9767 vcfg->strip_tag1_en ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
9768 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, in hclge_set_vlan_rx_offload_cfg()
9769 vcfg->strip_tag2_en ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
9770 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, in hclge_set_vlan_rx_offload_cfg()
9771 vcfg->vlan1_vlan_prionly ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
9772 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, in hclge_set_vlan_rx_offload_cfg()
9773 vcfg->vlan2_vlan_prionly ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
9774 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B, in hclge_set_vlan_rx_offload_cfg()
9775 vcfg->strip_tag1_discard_en ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
9776 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B, in hclge_set_vlan_rx_offload_cfg()
9777 vcfg->strip_tag2_discard_en ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
9779 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; in hclge_set_vlan_rx_offload_cfg()
9780 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / in hclge_set_vlan_rx_offload_cfg()
9782 req->vf_bitmap[bmap_index] = in hclge_set_vlan_rx_offload_cfg()
9783 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); in hclge_set_vlan_rx_offload_cfg()
9785 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_rx_offload_cfg()
9787 dev_err(&hdev->pdev->dev, in hclge_set_vlan_rx_offload_cfg()
9801 vport->txvlan_cfg.accept_tag1 = true; in hclge_vlan_offload_cfg()
9802 vport->txvlan_cfg.insert_tag1_en = false; in hclge_vlan_offload_cfg()
9803 vport->txvlan_cfg.default_tag1 = 0; in hclge_vlan_offload_cfg()
9805 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); in hclge_vlan_offload_cfg()
9807 vport->txvlan_cfg.accept_tag1 = in hclge_vlan_offload_cfg()
9808 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3; in hclge_vlan_offload_cfg()
9809 vport->txvlan_cfg.insert_tag1_en = true; in hclge_vlan_offload_cfg()
9810 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) | in hclge_vlan_offload_cfg()
9814 vport->txvlan_cfg.accept_untag1 = true; in hclge_vlan_offload_cfg()
9820 vport->txvlan_cfg.accept_tag2 = true; in hclge_vlan_offload_cfg()
9821 vport->txvlan_cfg.accept_untag2 = true; in hclge_vlan_offload_cfg()
9822 vport->txvlan_cfg.insert_tag2_en = false; in hclge_vlan_offload_cfg()
9823 vport->txvlan_cfg.default_tag2 = 0; in hclge_vlan_offload_cfg()
9824 vport->txvlan_cfg.tag_shift_mode_en = true; in hclge_vlan_offload_cfg()
9827 vport->rxvlan_cfg.strip_tag1_en = false; in hclge_vlan_offload_cfg()
9828 vport->rxvlan_cfg.strip_tag2_en = in hclge_vlan_offload_cfg()
9829 vport->rxvlan_cfg.rx_vlan_offload_en; in hclge_vlan_offload_cfg()
9830 vport->rxvlan_cfg.strip_tag2_discard_en = false; in hclge_vlan_offload_cfg()
9832 vport->rxvlan_cfg.strip_tag1_en = in hclge_vlan_offload_cfg()
9833 vport->rxvlan_cfg.rx_vlan_offload_en; in hclge_vlan_offload_cfg()
9834 vport->rxvlan_cfg.strip_tag2_en = true; in hclge_vlan_offload_cfg()
9835 vport->rxvlan_cfg.strip_tag2_discard_en = true; in hclge_vlan_offload_cfg()
9838 vport->rxvlan_cfg.strip_tag1_discard_en = false; in hclge_vlan_offload_cfg()
9839 vport->rxvlan_cfg.vlan1_vlan_prionly = false; in hclge_vlan_offload_cfg()
9840 vport->rxvlan_cfg.vlan2_vlan_prionly = false; in hclge_vlan_offload_cfg()
9858 rx_req->ot_fst_vlan_type = in hclge_set_vlan_protocol_type()
9859 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); in hclge_set_vlan_protocol_type()
9860 rx_req->ot_sec_vlan_type = in hclge_set_vlan_protocol_type()
9861 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); in hclge_set_vlan_protocol_type()
9862 rx_req->in_fst_vlan_type = in hclge_set_vlan_protocol_type()
9863 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); in hclge_set_vlan_protocol_type()
9864 rx_req->in_sec_vlan_type = in hclge_set_vlan_protocol_type()
9865 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); in hclge_set_vlan_protocol_type()
9867 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
9869 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
9878 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); in hclge_set_vlan_protocol_type()
9879 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); in hclge_set_vlan_protocol_type()
9881 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
9883 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
9896 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_init_vlan_filter()
9902 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vlan_filter()
9903 vport = &hdev->vport[i]; in hclge_init_vlan_filter()
9906 vport->vport_id); in hclge_init_vlan_filter()
9909 vport->cur_vlan_fltr_en = true; in hclge_init_vlan_filter()
9918 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
9919 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
9920 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
9921 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
9922 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
9923 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
9935 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vport_vlan_offload()
9936 vport = &hdev->vport[i]; in hclge_init_vport_vlan_offload()
9937 cfg = &vport->port_base_vlan_cfg; in hclge_init_vport_vlan_offload()
9939 ret = hclge_vlan_offload_cfg(vport, cfg->state, in hclge_init_vport_vlan_offload()
9940 cfg->vlan_info.vlan_tag, in hclge_init_vport_vlan_offload()
9941 cfg->vlan_info.qos); in hclge_init_vport_vlan_offload()
9950 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_init_vlan_config()
9972 struct hclge_dev *hdev = vport->back; in hclge_add_vport_vlan_table()
9974 mutex_lock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
9976 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_add_vport_vlan_table()
9977 if (vlan->vlan_id == vlan_id) { in hclge_add_vport_vlan_table()
9978 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
9985 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
9989 vlan->hd_tbl_status = writen_to_tbl; in hclge_add_vport_vlan_table()
9990 vlan->vlan_id = vlan_id; in hclge_add_vport_vlan_table()
9992 list_add_tail(&vlan->node, &vport->vlan_list); in hclge_add_vport_vlan_table()
9993 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
9999 struct hclge_dev *hdev = vport->back; in hclge_add_vport_all_vlan_table()
10002 mutex_lock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10004 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_add_vport_all_vlan_table()
10005 if (!vlan->hd_tbl_status) { in hclge_add_vport_all_vlan_table()
10007 vport->vport_id, in hclge_add_vport_all_vlan_table()
10008 vlan->vlan_id, false); in hclge_add_vport_all_vlan_table()
10010 dev_err(&hdev->pdev->dev, in hclge_add_vport_all_vlan_table()
10014 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10018 vlan->hd_tbl_status = true; in hclge_add_vport_all_vlan_table()
10021 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10030 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_vlan_table()
10032 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_rm_vport_vlan_table()
10033 if (vlan->vlan_id == vlan_id) { in hclge_rm_vport_vlan_table()
10034 if (is_write_tbl && vlan->hd_tbl_status) in hclge_rm_vport_vlan_table()
10037 vport->vport_id, in hclge_rm_vport_vlan_table()
10041 list_del(&vlan->node); in hclge_rm_vport_vlan_table()
10051 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_vlan_table()
10053 mutex_lock(&hdev->vport_lock); in hclge_rm_vport_all_vlan_table()
10055 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_rm_vport_all_vlan_table()
10056 if (vlan->hd_tbl_status) in hclge_rm_vport_all_vlan_table()
10059 vport->vport_id, in hclge_rm_vport_all_vlan_table()
10060 vlan->vlan_id, in hclge_rm_vport_all_vlan_table()
10063 vlan->hd_tbl_status = false; in hclge_rm_vport_all_vlan_table()
10065 list_del(&vlan->node); in hclge_rm_vport_all_vlan_table()
10069 clear_bit(vport->vport_id, hdev->vf_vlan_full); in hclge_rm_vport_all_vlan_table()
10070 mutex_unlock(&hdev->vport_lock); in hclge_rm_vport_all_vlan_table()
10079 mutex_lock(&hdev->vport_lock); in hclge_uninit_vport_vlan_table()
10081 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_vport_vlan_table()
10082 vport = &hdev->vport[i]; in hclge_uninit_vport_vlan_table()
10083 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_uninit_vport_vlan_table()
10084 list_del(&vlan->node); in hclge_uninit_vport_vlan_table()
10089 mutex_unlock(&hdev->vport_lock); in hclge_uninit_vport_vlan_table()
10103 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { in hclge_restore_vport_port_base_vlan_config()
10104 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; in hclge_restore_vport_port_base_vlan_config()
10105 vlan_info = vport->port_base_vlan_cfg.tbl_sta ? in hclge_restore_vport_port_base_vlan_config()
10106 &vport->port_base_vlan_cfg.vlan_info : in hclge_restore_vport_port_base_vlan_config()
10107 &vport->port_base_vlan_cfg.old_vlan_info; in hclge_restore_vport_port_base_vlan_config()
10109 vlan_id = vlan_info->vlan_tag; in hclge_restore_vport_port_base_vlan_config()
10110 vlan_proto = vlan_info->vlan_proto; in hclge_restore_vport_port_base_vlan_config()
10111 state = vport->port_base_vlan_cfg.state; in hclge_restore_vport_port_base_vlan_config()
10114 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); in hclge_restore_vport_port_base_vlan_config()
10116 vport->vport_id, in hclge_restore_vport_port_base_vlan_config()
10118 vport->port_base_vlan_cfg.tbl_sta = ret == 0; in hclge_restore_vport_port_base_vlan_config()
10126 struct hclge_dev *hdev = vport->back; in hclge_restore_vport_vlan_table()
10129 mutex_lock(&hdev->vport_lock); in hclge_restore_vport_vlan_table()
10131 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { in hclge_restore_vport_vlan_table()
10132 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_restore_vport_vlan_table()
10134 vport->vport_id, in hclge_restore_vport_vlan_table()
10135 vlan->vlan_id, false); in hclge_restore_vport_vlan_table()
10138 vlan->hd_tbl_status = true; in hclge_restore_vport_vlan_table()
10142 mutex_unlock(&hdev->vport_lock); in hclge_restore_vport_vlan_table()
10156 if (mac_node->state == HCLGE_MAC_ACTIVE) { in hclge_mac_node_convert_for_reset()
10157 mac_node->state = HCLGE_MAC_TO_ADD; in hclge_mac_node_convert_for_reset()
10158 } else if (mac_node->state == HCLGE_MAC_TO_DEL) { in hclge_mac_node_convert_for_reset()
10159 list_del(&mac_node->node); in hclge_mac_node_convert_for_reset()
10167 spin_lock_bh(&vport->mac_list_lock); in hclge_restore_mac_table_common()
10169 hclge_mac_node_convert_for_reset(&vport->uc_mac_list); in hclge_restore_mac_table_common()
10170 hclge_mac_node_convert_for_reset(&vport->mc_mac_list); in hclge_restore_mac_table_common()
10171 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); in hclge_restore_mac_table_common()
10173 spin_unlock_bh(&vport->mac_list_lock); in hclge_restore_mac_table_common()
10178 struct hclge_vport *vport = &hdev->vport[0]; in hclge_restore_hw_table()
10179 struct hnae3_handle *handle = &vport->nic; in hclge_restore_hw_table()
10184 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_restore_hw_table()
10192 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { in hclge_en_hw_strip_rxvtag()
10193 vport->rxvlan_cfg.strip_tag1_en = false; in hclge_en_hw_strip_rxvtag()
10194 vport->rxvlan_cfg.strip_tag2_en = enable; in hclge_en_hw_strip_rxvtag()
10195 vport->rxvlan_cfg.strip_tag2_discard_en = false; in hclge_en_hw_strip_rxvtag()
10197 vport->rxvlan_cfg.strip_tag1_en = enable; in hclge_en_hw_strip_rxvtag()
10198 vport->rxvlan_cfg.strip_tag2_en = true; in hclge_en_hw_strip_rxvtag()
10199 vport->rxvlan_cfg.strip_tag2_discard_en = true; in hclge_en_hw_strip_rxvtag()
10202 vport->rxvlan_cfg.strip_tag1_discard_en = false; in hclge_en_hw_strip_rxvtag()
10203 vport->rxvlan_cfg.vlan1_vlan_prionly = false; in hclge_en_hw_strip_rxvtag()
10204 vport->rxvlan_cfg.vlan2_vlan_prionly = false; in hclge_en_hw_strip_rxvtag()
10205 vport->rxvlan_cfg.rx_vlan_offload_en = enable; in hclge_en_hw_strip_rxvtag()
10212 struct hclge_dev *hdev = vport->back; in hclge_set_vport_vlan_fltr_change()
10214 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) in hclge_set_vport_vlan_fltr_change()
10215 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); in hclge_set_vport_vlan_fltr_change()
10223 struct hclge_dev *hdev = vport->back; in hclge_update_vlan_filter_entries()
10229 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0); in hclge_update_vlan_filter_entries()
10233 htons(new_info->vlan_proto), in hclge_update_vlan_filter_entries()
10234 vport->vport_id, in hclge_update_vlan_filter_entries()
10235 new_info->vlan_tag, in hclge_update_vlan_filter_entries()
10239 vport->port_base_vlan_cfg.tbl_sta = false; in hclge_update_vlan_filter_entries()
10242 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); in hclge_update_vlan_filter_entries()
10246 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), in hclge_update_vlan_filter_entries()
10247 vport->vport_id, old_info->vlan_tag, in hclge_update_vlan_filter_entries()
10258 if (new_cfg->vlan_tag != old_cfg->vlan_tag) in hclge_need_update_vlan_filter()
10261 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0)) in hclge_need_update_vlan_filter()
10271 struct hclge_dev *hdev = vport->back; in hclge_modify_port_base_vlan_tag()
10275 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), in hclge_modify_port_base_vlan_tag()
10276 vport->vport_id, new_info->vlan_tag, in hclge_modify_port_base_vlan_tag()
10281 vport->port_base_vlan_cfg.tbl_sta = false; in hclge_modify_port_base_vlan_tag()
10283 if (old_info->vlan_tag == 0) in hclge_modify_port_base_vlan_tag()
10284 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, in hclge_modify_port_base_vlan_tag()
10288 vport->vport_id, in hclge_modify_port_base_vlan_tag()
10289 old_info->vlan_tag, true); in hclge_modify_port_base_vlan_tag()
10291 dev_err(&hdev->pdev->dev, in hclge_modify_port_base_vlan_tag()
10293 vport->vport_id, old_info->vlan_tag, ret); in hclge_modify_port_base_vlan_tag()
10301 struct hnae3_handle *nic = &vport->nic; in hclge_update_port_base_vlan_cfg()
10305 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; in hclge_update_port_base_vlan_cfg()
10307 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag, in hclge_update_port_base_vlan_cfg()
10308 vlan_info->qos); in hclge_update_port_base_vlan_cfg()
10325 vport->port_base_vlan_cfg.state = state; in hclge_update_port_base_vlan_cfg()
10327 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; in hclge_update_port_base_vlan_cfg()
10329 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; in hclge_update_port_base_vlan_cfg()
10331 vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; in hclge_update_port_base_vlan_cfg()
10332 vport->port_base_vlan_cfg.vlan_info = *vlan_info; in hclge_update_port_base_vlan_cfg()
10333 vport->port_base_vlan_cfg.tbl_sta = true; in hclge_update_port_base_vlan_cfg()
10353 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan && in hclge_get_port_base_vlan_state()
10354 vport->port_base_vlan_cfg.vlan_info.qos == qos) in hclge_get_port_base_vlan_state()
10363 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); in hclge_set_vf_vlan_filter()
10365 struct hclge_dev *hdev = vport->back; in hclge_set_vf_vlan_filter()
10370 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vf_vlan_filter()
10371 return -EOPNOTSUPP; in hclge_set_vf_vlan_filter()
10375 return -EINVAL; in hclge_set_vf_vlan_filter()
10378 if (vlan > VLAN_N_VID - 1 || qos > 7) in hclge_set_vf_vlan_filter()
10379 return -EINVAL; in hclge_set_vf_vlan_filter()
10381 return -EPROTONOSUPPORT; in hclge_set_vf_vlan_filter()
10384 vport->port_base_vlan_cfg.state, in hclge_set_vf_vlan_filter()
10395 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_filter()
10407 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { in hclge_set_vf_vlan_filter()
10408 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) in hclge_set_vf_vlan_filter()
10409 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], in hclge_set_vf_vlan_filter()
10410 vport->vport_id, in hclge_set_vf_vlan_filter()
10415 &vport->need_notify); in hclge_set_vf_vlan_filter()
10428 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { in hclge_clear_vf_vlan()
10429 vport = &hdev->vport[vf]; in hclge_clear_vf_vlan()
10430 vlan_info = &vport->port_base_vlan_cfg.vlan_info; in hclge_clear_vf_vlan()
10433 vport->vport_id, in hclge_clear_vf_vlan()
10434 vlan_info->vlan_tag, true); in hclge_clear_vf_vlan()
10436 dev_err(&hdev->pdev->dev, in hclge_clear_vf_vlan()
10438 vf - HCLGE_VF_VPORT_START_NUM, ret); in hclge_clear_vf_vlan()
10446 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_filter()
10454 mutex_lock(&hdev->vport_lock); in hclge_set_vlan_filter()
10455 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_set_vlan_filter()
10456 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { in hclge_set_vlan_filter()
10457 set_bit(vlan_id, vport->vlan_del_fail_bmap); in hclge_set_vlan_filter()
10458 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10459 return -EBUSY; in hclge_set_vlan_filter()
10460 } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) { in hclge_set_vlan_filter()
10461 clear_bit(vlan_id, vport->vlan_del_fail_bmap); in hclge_set_vlan_filter()
10463 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10471 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { in hclge_set_vlan_filter()
10472 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, in hclge_set_vlan_filter()
10482 mutex_lock(&hdev->vport_lock); in hclge_set_vlan_filter()
10484 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10491 mutex_lock(&hdev->vport_lock); in hclge_set_vlan_filter()
10492 set_bit(vlan_id, vport->vlan_del_fail_bmap); in hclge_set_vlan_filter()
10493 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10507 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_vlan_fltr_state()
10508 vport = &hdev->vport[i]; in hclge_sync_vlan_fltr_state()
10510 &vport->state)) in hclge_sync_vlan_fltr_state()
10514 vport->req_vlan_fltr_en); in hclge_sync_vlan_fltr_state()
10516 dev_err(&hdev->pdev->dev, in hclge_sync_vlan_fltr_state()
10518 vport->vport_id, ret); in hclge_sync_vlan_fltr_state()
10520 &vport->state); in hclge_sync_vlan_fltr_state()
10533 mutex_lock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10535 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_vlan_filter()
10536 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_vlan_filter()
10538 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, in hclge_sync_vlan_filter()
10542 vport->vport_id, vlan_id, in hclge_sync_vlan_filter()
10544 if (ret && ret != -EINVAL) { in hclge_sync_vlan_filter()
10545 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10549 clear_bit(vlan_id, vport->vlan_del_fail_bmap); in hclge_sync_vlan_filter()
10555 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10559 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, in hclge_sync_vlan_filter()
10563 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10576 req->max_frm_size = cpu_to_le16(new_mps); in hclge_set_mac_mtu()
10577 req->min_frm_size = HCLGE_MAC_MIN_FRAME; in hclge_set_mac_mtu()
10579 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_mac_mtu()
10591 struct hclge_dev *hdev = vport->back; in hclge_set_vport_mtu()
10597 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size) in hclge_set_vport_mtu()
10598 return -EINVAL; in hclge_set_vport_mtu()
10601 mutex_lock(&hdev->vport_lock); in hclge_set_vport_mtu()
10602 /* VF's mps must fit within hdev->mps */ in hclge_set_vport_mtu()
10603 if (vport->vport_id && max_frm_size > hdev->mps) { in hclge_set_vport_mtu()
10604 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10605 return -EINVAL; in hclge_set_vport_mtu()
10606 } else if (vport->vport_id) { in hclge_set_vport_mtu()
10607 vport->mps = max_frm_size; in hclge_set_vport_mtu()
10608 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10613 for (i = 1; i < hdev->num_alloc_vport; i++) in hclge_set_vport_mtu()
10614 if (max_frm_size < hdev->vport[i].mps) { in hclge_set_vport_mtu()
10615 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10617 i, hdev->vport[i].mps); in hclge_set_vport_mtu()
10618 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10619 return -EINVAL; in hclge_set_vport_mtu()
10626 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10631 hdev->mps = max_frm_size; in hclge_set_vport_mtu()
10632 vport->mps = max_frm_size; in hclge_set_vport_mtu()
10636 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10641 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10655 req->tqp_id = cpu_to_le16(queue_id); in hclge_reset_tqp_cmd_send()
10657 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); in hclge_reset_tqp_cmd_send()
10659 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_reset_tqp_cmd_send()
10661 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd_send()
10679 req->tqp_id = cpu_to_le16(queue_id); in hclge_get_reset_status()
10681 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_reset_status()
10683 dev_err(&hdev->pdev->dev, in hclge_get_reset_status()
10688 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); in hclge_get_reset_status()
10698 queue = handle->kinfo.tqp[queue_id]; in hclge_covert_handle_qid_global()
10701 return tqp->index; in hclge_covert_handle_qid_global()
10707 struct hclge_dev *hdev = vport->back; in hclge_reset_tqp_cmd()
10714 for (i = 0; i < handle->kinfo.num_tqps; i++) { in hclge_reset_tqp_cmd()
10718 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10738 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10740 return -ETIME; in hclge_reset_tqp_cmd()
10745 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10761 struct hclge_dev *hdev = vport->back; in hclge_reset_rcb()
10772 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1); in hclge_reset_rcb()
10773 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid); in hclge_reset_rcb()
10774 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps); in hclge_reset_rcb()
10776 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_reset_rcb()
10778 dev_err(&hdev->pdev->dev, in hclge_reset_rcb()
10783 return_status = req->fun_reset_rcb_return_status; in hclge_reset_rcb()
10788 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", in hclge_reset_rcb()
10790 return -EIO; in hclge_reset_rcb()
10802 struct hclge_dev *hdev = vport->back; in hclge_reset_tqp()
10806 if (!vport->vport_id) { in hclge_reset_tqp()
10809 dev_err(&hdev->pdev->dev, in hclge_reset_tqp()
10821 struct hclge_dev *hdev = vport->back; in hclge_get_fw_version()
10823 return hdev->fw_version; in hclge_get_fw_version()
10828 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_flowctrl_adv()
10840 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) in hclge_cfg_pauseparam()
10845 dev_err(&hdev->pdev->dev, in hclge_cfg_pauseparam()
10853 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_cfg_flowctrl()
10859 if (!phydev->link) in hclge_cfg_flowctrl()
10862 if (!phydev->autoneg) in hclge_cfg_flowctrl()
10865 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); in hclge_cfg_flowctrl()
10867 if (phydev->pause) in hclge_cfg_flowctrl()
10870 if (phydev->asym_pause) in hclge_cfg_flowctrl()
10878 if (phydev->duplex == HCLGE_MAC_HALF) { in hclge_cfg_flowctrl()
10890 struct hclge_dev *hdev = vport->back; in hclge_get_pauseparam()
10891 u8 media_type = hdev->hw.mac.media_type; in hclge_get_pauseparam()
10896 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_get_pauseparam()
10902 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { in hclge_get_pauseparam()
10905 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { in hclge_get_pauseparam()
10908 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { in hclge_get_pauseparam()
10921 hdev->fc_mode_last_time = HCLGE_FC_FULL; in hclge_record_user_pauseparam()
10923 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; in hclge_record_user_pauseparam()
10925 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; in hclge_record_user_pauseparam()
10927 hdev->fc_mode_last_time = HCLGE_FC_NONE; in hclge_record_user_pauseparam()
10929 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; in hclge_record_user_pauseparam()
10936 struct hclge_dev *hdev = vport->back; in hclge_set_pauseparam()
10937 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_pauseparam()
10943 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
10944 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); in hclge_set_pauseparam()
10945 return -EOPNOTSUPP; in hclge_set_pauseparam()
10949 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_set_pauseparam()
10950 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
10952 return -EOPNOTSUPP; in hclge_set_pauseparam()
10965 return -EOPNOTSUPP; in hclge_set_pauseparam()
10972 struct hclge_dev *hdev = vport->back; in hclge_get_ksettings_an_result()
10975 *speed = hdev->hw.mac.speed; in hclge_get_ksettings_an_result()
10977 *duplex = hdev->hw.mac.duplex; in hclge_get_ksettings_an_result()
10979 *auto_neg = hdev->hw.mac.autoneg; in hclge_get_ksettings_an_result()
10981 *lane_num = hdev->hw.mac.lane_num; in hclge_get_ksettings_an_result()
10988 struct hclge_dev *hdev = vport->back; in hclge_get_media_type()
10997 *media_type = hdev->hw.mac.media_type; in hclge_get_media_type()
11000 *module_type = hdev->hw.mac.module_type; in hclge_get_media_type()
11007 struct hclge_dev *hdev = vport->back; in hclge_get_mdix_mode()
11008 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mdix_mode()
11055 struct hnae3_handle *handle = &hdev->vport->nic; in hclge_info_show()
11056 struct device *dev = &hdev->pdev->dev; in hclge_info_show()
11060 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); in hclge_info_show()
11061 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); in hclge_info_show()
11062 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); in hclge_info_show()
11063 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); in hclge_info_show()
11064 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); in hclge_info_show()
11065 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); in hclge_info_show()
11066 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); in hclge_info_show()
11067 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); in hclge_info_show()
11068 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); in hclge_info_show()
11070 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); in hclge_info_show()
11072 handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable"); in hclge_info_show()
11074 handle->kinfo.tc_info.mqprio_active ? "enable" : "disable"); in hclge_info_show()
11076 hdev->tx_spare_buf_size); in hclge_info_show()
11084 struct hnae3_client *client = vport->nic.client; in hclge_init_nic_client_instance()
11085 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_nic_client_instance()
11086 int rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_nic_client_instance()
11089 ret = client->ops->init_instance(&vport->nic); in hclge_init_nic_client_instance()
11093 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
11094 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_nic_client_instance()
11095 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_nic_client_instance()
11096 ret = -EBUSY; in hclge_init_nic_client_instance()
11103 dev_err(&ae_dev->pdev->dev, in hclge_init_nic_client_instance()
11110 if (netif_msg_drv(&hdev->vport->nic)) in hclge_init_nic_client_instance()
11116 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
11117 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_nic_client_instance()
11120 client->ops->uninit_instance(&vport->nic, 0); in hclge_init_nic_client_instance()
11128 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_roce_client_instance()
11133 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || in hclge_init_roce_client_instance()
11134 !hdev->nic_client) in hclge_init_roce_client_instance()
11137 client = hdev->roce_client; in hclge_init_roce_client_instance()
11142 rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_roce_client_instance()
11143 ret = client->ops->init_instance(&vport->roce); in hclge_init_roce_client_instance()
11147 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
11148 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_roce_client_instance()
11149 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_roce_client_instance()
11150 ret = -EBUSY; in hclge_init_roce_client_instance()
11157 dev_err(&ae_dev->pdev->dev, in hclge_init_roce_client_instance()
11167 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
11168 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_roce_client_instance()
11171 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); in hclge_init_roce_client_instance()
11179 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_client_instance()
11180 struct hclge_vport *vport = &hdev->vport[0]; in hclge_init_client_instance()
11183 switch (client->type) { in hclge_init_client_instance()
11185 hdev->nic_client = client; in hclge_init_client_instance()
11186 vport->nic.client = client; in hclge_init_client_instance()
11198 hdev->roce_client = client; in hclge_init_client_instance()
11199 vport->roce.client = client; in hclge_init_client_instance()
11208 return -EINVAL; in hclge_init_client_instance()
11214 hdev->nic_client = NULL; in hclge_init_client_instance()
11215 vport->nic.client = NULL; in hclge_init_client_instance()
11218 hdev->roce_client = NULL; in hclge_init_client_instance()
11219 vport->roce.client = NULL; in hclge_init_client_instance()
11226 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_client_instance()
11227 struct hclge_vport *vport = &hdev->vport[0]; in hclge_uninit_client_instance()
11229 if (hdev->roce_client) { in hclge_uninit_client_instance()
11230 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
11231 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_uninit_client_instance()
11234 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); in hclge_uninit_client_instance()
11235 hdev->roce_client = NULL; in hclge_uninit_client_instance()
11236 vport->roce.client = NULL; in hclge_uninit_client_instance()
11238 if (client->type == HNAE3_CLIENT_ROCE) in hclge_uninit_client_instance()
11240 if (hdev->nic_client && client->ops->uninit_instance) { in hclge_uninit_client_instance()
11241 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
11242 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_uninit_client_instance()
11245 client->ops->uninit_instance(&vport->nic, 0); in hclge_uninit_client_instance()
11246 hdev->nic_client = NULL; in hclge_uninit_client_instance()
11247 vport->nic.client = NULL; in hclge_uninit_client_instance()
11253 struct pci_dev *pdev = hdev->pdev; in hclge_dev_mem_map()
11254 struct hclge_hw *hw = &hdev->hw; in hclge_dev_mem_map()
11260 hw->hw.mem_base = in hclge_dev_mem_map()
11261 devm_ioremap_wc(&pdev->dev, in hclge_dev_mem_map()
11264 if (!hw->hw.mem_base) { in hclge_dev_mem_map()
11265 dev_err(&pdev->dev, "failed to map device memory\n"); in hclge_dev_mem_map()
11266 return -EFAULT; in hclge_dev_mem_map()
11274 struct pci_dev *pdev = hdev->pdev; in hclge_pci_init()
11280 dev_err(&pdev->dev, "failed to enable PCI device\n"); in hclge_pci_init()
11284 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in hclge_pci_init()
11286 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in hclge_pci_init()
11288 dev_err(&pdev->dev, in hclge_pci_init()
11292 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); in hclge_pci_init()
11297 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); in hclge_pci_init()
11302 hw = &hdev->hw; in hclge_pci_init()
11303 hw->hw.io_base = pcim_iomap(pdev, 2, 0); in hclge_pci_init()
11304 if (!hw->hw.io_base) { in hclge_pci_init()
11305 dev_err(&pdev->dev, "Can't map configuration register space\n"); in hclge_pci_init()
11306 ret = -ENOMEM; in hclge_pci_init()
11314 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); in hclge_pci_init()
11319 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_pci_init()
11330 struct pci_dev *pdev = hdev->pdev; in hclge_pci_uninit()
11332 if (hdev->hw.hw.mem_base) in hclge_pci_uninit()
11333 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); in hclge_pci_uninit()
11335 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_pci_uninit()
11343 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); in hclge_state_init()
11344 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_init()
11345 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); in hclge_state_init()
11346 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_state_init()
11347 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_state_init()
11348 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); in hclge_state_init()
11349 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_state_init()
11354 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_uninit()
11355 set_bit(HCLGE_STATE_REMOVING, &hdev->state); in hclge_state_uninit()
11357 if (hdev->reset_timer.function) in hclge_state_uninit()
11358 del_timer_sync(&hdev->reset_timer); in hclge_state_uninit()
11359 if (hdev->service_task.work.func) in hclge_state_uninit()
11360 cancel_delayed_work_sync(&hdev->service_task); in hclge_state_uninit()
11369 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_prepare_general()
11374 down(&hdev->reset_sem); in hclge_reset_prepare_general()
11375 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_prepare_general()
11376 hdev->reset_type = rst_type; in hclge_reset_prepare_general()
11378 if (!ret && !hdev->reset_pending) in hclge_reset_prepare_general()
11381 dev_err(&hdev->pdev->dev, in hclge_reset_prepare_general()
11383 ret, hdev->reset_pending, retry_cnt); in hclge_reset_prepare_general()
11384 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_prepare_general()
11385 up(&hdev->reset_sem); in hclge_reset_prepare_general()
11390 hclge_enable_vector(&hdev->misc_vector, false); in hclge_reset_prepare_general()
11391 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_reset_prepare_general()
11393 if (hdev->reset_type == HNAE3_FLR_RESET) in hclge_reset_prepare_general()
11394 hdev->rst_stats.flr_rst_cnt++; in hclge_reset_prepare_general()
11399 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_done()
11402 hclge_enable_vector(&hdev->misc_vector, true); in hclge_reset_done()
11406 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); in hclge_reset_done()
11408 hdev->reset_type = HNAE3_NONE_RESET; in hclge_reset_done()
11409 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_done()
11410 up(&hdev->reset_sem); in hclge_reset_done()
11417 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_clear_resetting_state()
11418 struct hclge_vport *vport = &hdev->vport[i]; in hclge_clear_resetting_state()
11422 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); in hclge_clear_resetting_state()
11424 dev_warn(&hdev->pdev->dev, in hclge_clear_resetting_state()
11426 vport->vport_id, ret); in hclge_clear_resetting_state()
11437 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_clear_hw_resource()
11439 * fail with older firmware. Error value -EOPNOSUPP can only be in hclge_clear_hw_resource()
11444 if (ret && ret != -EOPNOTSUPP) { in hclge_clear_hw_resource()
11445 dev_err(&hdev->pdev->dev, in hclge_clear_hw_resource()
11454 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) in hclge_init_rxd_adv_layout()
11455 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1); in hclge_init_rxd_adv_layout()
11460 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) in hclge_uninit_rxd_adv_layout()
11461 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0); in hclge_uninit_rxd_adv_layout()
11468 return &vport->back->hw.mac.wol; in hclge_get_wol_info()
11482 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_wol_supported_mode()
11484 dev_err(&hdev->pdev->dev, in hclge_get_wol_supported_mode()
11489 *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode); in hclge_get_wol_supported_mode()
11503 wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode); in hclge_set_wol_cfg()
11504 wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size; in hclge_set_wol_cfg()
11505 memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX); in hclge_set_wol_cfg()
11507 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_wol_cfg()
11509 dev_err(&hdev->pdev->dev, in hclge_set_wol_cfg()
11517 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; in hclge_update_wol()
11519 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) in hclge_update_wol()
11527 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; in hclge_init_wol()
11530 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) in hclge_init_wol()
11535 &wol_info->wol_support_mode); in hclge_init_wol()
11537 wol_info->wol_support_mode = 0; in hclge_init_wol()
11549 wol->supported = wol_info->wol_support_mode; in hclge_get_wol()
11550 wol->wolopts = wol_info->wol_current_mode; in hclge_get_wol()
11551 if (wol_info->wol_current_mode & WAKE_MAGICSECURE) in hclge_get_wol()
11552 memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX); in hclge_get_wol()
11563 wol_mode = wol->wolopts; in hclge_set_wol()
11564 if (wol_mode & ~wol_info->wol_support_mode) in hclge_set_wol()
11565 return -EINVAL; in hclge_set_wol()
11567 wol_info->wol_current_mode = wol_mode; in hclge_set_wol()
11569 memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX); in hclge_set_wol()
11570 wol_info->wol_sopass_size = SOPASS_MAX; in hclge_set_wol()
11572 wol_info->wol_sopass_size = 0; in hclge_set_wol()
11575 ret = hclge_set_wol_cfg(vport->back, wol_info); in hclge_set_wol()
11577 wol_info->wol_current_mode = 0; in hclge_set_wol()
11584 struct pci_dev *pdev = ae_dev->pdev; in hclge_init_ae_dev()
11588 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); in hclge_init_ae_dev()
11590 return -ENOMEM; in hclge_init_ae_dev()
11592 hdev->pdev = pdev; in hclge_init_ae_dev()
11593 hdev->ae_dev = ae_dev; in hclge_init_ae_dev()
11594 hdev->reset_type = HNAE3_NONE_RESET; in hclge_init_ae_dev()
11595 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_init_ae_dev()
11596 ae_dev->priv = hdev; in hclge_init_ae_dev()
11599 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; in hclge_init_ae_dev()
11601 mutex_init(&hdev->vport_lock); in hclge_init_ae_dev()
11602 spin_lock_init(&hdev->fd_rule_lock); in hclge_init_ae_dev()
11603 sema_init(&hdev->reset_sem, 1); in hclge_init_ae_dev()
11614 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); in hclge_init_ae_dev()
11619 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, in hclge_init_ae_dev()
11620 true, hdev->reset_pending); in hclge_init_ae_dev()
11634 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n", in hclge_init_ae_dev()
11641 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); in hclge_init_ae_dev()
11647 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); in hclge_init_ae_dev()
11657 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); in hclge_init_ae_dev()
11669 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { in hclge_init_ae_dev()
11670 clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); in hclge_init_ae_dev()
11686 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); in hclge_init_ae_dev()
11692 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); in hclge_init_ae_dev()
11702 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); in hclge_init_ae_dev()
11708 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); in hclge_init_ae_dev()
11712 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev, in hclge_init_ae_dev()
11713 &hdev->rss_cfg); in hclge_init_ae_dev()
11715 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); in hclge_init_ae_dev()
11721 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); in hclge_init_ae_dev()
11727 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); in hclge_init_ae_dev()
11733 dev_err(&pdev->dev, in hclge_init_ae_dev()
11746 INIT_KFIFO(hdev->mac_tnl_log); in hclge_init_ae_dev()
11750 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); in hclge_init_ae_dev()
11751 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); in hclge_init_ae_dev()
11765 if (ae_dev->hw_err_reset_req) { in hclge_init_ae_dev()
11769 &ae_dev->hw_err_reset_req); in hclge_init_ae_dev()
11771 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_init_ae_dev()
11777 hclge_enable_vector(&hdev->misc_vector, true); in hclge_init_ae_dev()
11781 dev_warn(&pdev->dev, in hclge_init_ae_dev()
11785 hdev->last_reset_time = jiffies; in hclge_init_ae_dev()
11787 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", in hclge_init_ae_dev()
11795 if (hdev->hw.mac.phydev) in hclge_init_ae_dev()
11796 mdiobus_unregister(hdev->hw.mac.mdio_bus); in hclge_init_ae_dev()
11802 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); in hclge_init_ae_dev()
11806 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_init_ae_dev()
11810 mutex_destroy(&hdev->vport_lock); in hclge_init_ae_dev()
11816 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); in hclge_stats_clear()
11817 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats)); in hclge_stats_clear()
11839 dev_err(&hdev->pdev->dev, in hclge_set_vf_spoofchk_hw()
11847 dev_err(&hdev->pdev->dev, in hclge_set_vf_spoofchk_hw()
11858 struct hclge_dev *hdev = vport->back; in hclge_set_vf_spoofchk()
11862 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vf_spoofchk()
11863 return -EOPNOTSUPP; in hclge_set_vf_spoofchk()
11867 return -EINVAL; in hclge_set_vf_spoofchk()
11869 if (vport->vf_info.spoofchk == new_spoofchk) in hclge_set_vf_spoofchk()
11872 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) in hclge_set_vf_spoofchk()
11873 dev_warn(&hdev->pdev->dev, in hclge_set_vf_spoofchk()
11877 dev_warn(&hdev->pdev->dev, in hclge_set_vf_spoofchk()
11881 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); in hclge_set_vf_spoofchk()
11885 vport->vf_info.spoofchk = new_spoofchk; in hclge_set_vf_spoofchk()
11891 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_spoofchk()
11895 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_reset_vport_spoofchk()
11899 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_spoofchk()
11900 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, in hclge_reset_vport_spoofchk()
11901 vport->vf_info.spoofchk); in hclge_reset_vport_spoofchk()
11914 struct hclge_dev *hdev = vport->back; in hclge_set_vf_trust()
11919 return -EINVAL; in hclge_set_vf_trust()
11921 if (vport->vf_info.trusted == new_trusted) in hclge_set_vf_trust()
11924 vport->vf_info.trusted = new_trusted; in hclge_set_vf_trust()
11925 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); in hclge_set_vf_trust()
11937 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { in hclge_reset_vf_rate()
11938 struct hclge_vport *vport = &hdev->vport[vf]; in hclge_reset_vf_rate()
11940 vport->vf_info.max_tx_rate = 0; in hclge_reset_vf_rate()
11941 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); in hclge_reset_vf_rate()
11943 dev_err(&hdev->pdev->dev, in hclge_reset_vf_rate()
11945 vf - HCLGE_VF_VPORT_START_NUM, ret); in hclge_reset_vf_rate()
11953 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { in hclge_vf_rate_param_check()
11954 dev_err(&hdev->pdev->dev, in hclge_vf_rate_param_check()
11956 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); in hclge_vf_rate_param_check()
11957 return -EINVAL; in hclge_vf_rate_param_check()
11967 struct hclge_dev *hdev = vport->back; in hclge_set_vf_rate()
11976 return -EINVAL; in hclge_set_vf_rate()
11978 if (!force && max_tx_rate == vport->vf_info.max_tx_rate) in hclge_set_vf_rate()
11985 vport->vf_info.max_tx_rate = max_tx_rate; in hclge_set_vf_rate()
11992 struct hnae3_handle *handle = &hdev->vport->nic; in hclge_resume_vf_rate()
11998 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { in hclge_resume_vf_rate()
12001 return -EINVAL; in hclge_resume_vf_rate()
12006 if (!vport->vf_info.max_tx_rate) in hclge_resume_vf_rate()
12010 vport->vf_info.max_tx_rate, true); in hclge_resume_vf_rate()
12012 dev_err(&hdev->pdev->dev, in hclge_resume_vf_rate()
12014 vf, vport->vf_info.max_tx_rate, ret); in hclge_resume_vf_rate()
12024 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_state()
12027 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_state()
12028 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_reset_vport_state()
12035 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_ae_dev()
12036 struct pci_dev *pdev = ae_dev->pdev; in hclge_reset_ae_dev()
12039 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_reset_ae_dev()
12045 if (hdev->reset_type == HNAE3_IMP_RESET || in hclge_reset_ae_dev()
12046 hdev->reset_type == HNAE3_GLOBAL_RESET) { in hclge_reset_ae_dev()
12047 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); in hclge_reset_ae_dev()
12048 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); in hclge_reset_ae_dev()
12049 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); in hclge_reset_ae_dev()
12053 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, in hclge_reset_ae_dev()
12054 true, hdev->reset_pending); in hclge_reset_ae_dev()
12056 dev_err(&pdev->dev, "Cmd queue init failed\n"); in hclge_reset_ae_dev()
12062 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); in hclge_reset_ae_dev()
12068 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); in hclge_reset_ae_dev()
12074 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n", in hclge_reset_ae_dev()
12081 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); in hclge_reset_ae_dev()
12091 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); in hclge_reset_ae_dev()
12097 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); in hclge_reset_ae_dev()
12103 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); in hclge_reset_ae_dev()
12109 dev_err(&pdev->dev, in hclge_reset_ae_dev()
12116 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); in hclge_reset_ae_dev()
12130 /* Re-enable the hw error interrupts because in hclge_reset_ae_dev()
12135 dev_err(&pdev->dev, in hclge_reset_ae_dev()
12136 "fail(%d) to re-enable NIC hw error interrupts\n", in hclge_reset_ae_dev()
12141 if (hdev->roce_client) { in hclge_reset_ae_dev()
12144 dev_err(&pdev->dev, in hclge_reset_ae_dev()
12145 "fail(%d) to re-enable roce ras interrupts\n", in hclge_reset_ae_dev()
12164 dev_warn(&pdev->dev, in hclge_reset_ae_dev()
12167 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", in hclge_reset_ae_dev()
12175 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_ae_dev()
12176 struct hclge_mac *mac = &hdev->hw.mac; in hclge_uninit_ae_dev()
12186 if (mac->phydev) in hclge_uninit_ae_dev()
12187 mdiobus_unregister(mac->mdio_bus); in hclge_uninit_ae_dev()
12190 hclge_enable_vector(&hdev->misc_vector, false); in hclge_uninit_ae_dev()
12191 synchronize_irq(hdev->misc_vector.vector_irq); in hclge_uninit_ae_dev()
12198 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); in hclge_uninit_ae_dev()
12203 mutex_destroy(&hdev->vport_lock); in hclge_uninit_ae_dev()
12204 ae_dev->priv = NULL; in hclge_uninit_ae_dev()
12210 struct hclge_dev *hdev = vport->back; in hclge_get_max_channels()
12212 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); in hclge_get_max_channels()
12218 ch->max_combined = hclge_get_max_channels(handle); in hclge_get_channels()
12219 ch->other_count = 1; in hclge_get_channels()
12220 ch->max_other = 1; in hclge_get_channels()
12221 ch->combined_count = handle->kinfo.rss_size; in hclge_get_channels()
12228 struct hclge_dev *hdev = vport->back; in hclge_get_tqps_and_rss_info()
12230 *alloc_tqps = vport->alloc_tqps; in hclge_get_tqps_and_rss_info()
12231 *max_rss_size = hdev->pf_rss_size_max; in hclge_get_tqps_and_rss_info()
12238 struct hclge_dev *hdev = vport->back; in hclge_set_rss_tc_mode_cfg()
12244 roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size); in hclge_set_rss_tc_mode_cfg()
12250 if (!(hdev->hw_tc_map & BIT(i))) in hclge_set_rss_tc_mode_cfg()
12255 tc_offset[i] = vport->nic.kinfo.rss_size * i; in hclge_set_rss_tc_mode_cfg()
12258 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, in hclge_set_rss_tc_mode_cfg()
12265 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); in hclge_set_channels()
12267 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; in hclge_set_channels()
12268 struct hclge_dev *hdev = vport->back; in hclge_set_channels()
12269 u16 cur_rss_size = kinfo->rss_size; in hclge_set_channels()
12270 u16 cur_tqps = kinfo->num_tqps; in hclge_set_channels()
12275 kinfo->req_rss_size = new_tqps_num; in hclge_set_channels()
12279 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); in hclge_set_channels()
12292 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32), in hclge_set_channels()
12295 return -ENOMEM; in hclge_set_channels()
12297 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) in hclge_set_channels()
12298 rss_indir[i] = i % kinfo->rss_size; in hclge_set_channels()
12302 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", in hclge_set_channels()
12309 dev_info(&hdev->pdev->dev, in hclge_set_channels()
12311 cur_rss_size, kinfo->rss_size, in hclge_set_channels()
12312 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); in hclge_set_channels()
12326 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, in hclge_set_led_status()
12329 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_led_status()
12331 dev_err(&hdev->pdev->dev, in hclge_set_led_status()
12347 struct hclge_dev *hdev = vport->back; in hclge_set_led_id()
12355 return -EINVAL; in hclge_set_led_id()
12361 unsigned long *advertising) in hclge_get_link_mode() argument
12365 struct hclge_dev *hdev = vport->back; in hclge_get_link_mode()
12369 supported[idx] = hdev->hw.mac.supported[idx]; in hclge_get_link_mode()
12370 advertising[idx] = hdev->hw.mac.advertising[idx]; in hclge_get_link_mode()
12377 struct hclge_dev *hdev = vport->back; in hclge_gro_en()
12378 bool gro_en_old = hdev->gro_en; in hclge_gro_en()
12381 hdev->gro_en = enable; in hclge_gro_en()
12384 hdev->gro_en = gro_en_old; in hclge_gro_en()
12391 struct hnae3_handle *handle = &vport->nic; in hclge_sync_vport_promisc_mode()
12392 struct hclge_dev *hdev = vport->back; in hclge_sync_vport_promisc_mode()
12399 if (vport->last_promisc_flags != vport->overflow_promisc_flags) { in hclge_sync_vport_promisc_mode()
12400 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); in hclge_sync_vport_promisc_mode()
12401 vport->last_promisc_flags = vport->overflow_promisc_flags; in hclge_sync_vport_promisc_mode()
12405 &vport->state)) in hclge_sync_vport_promisc_mode()
12409 if (!vport->vport_id) { in hclge_sync_vport_promisc_mode()
12410 tmp_flags = handle->netdev_flags | vport->last_promisc_flags; in hclge_sync_vport_promisc_mode()
12415 &vport->state); in hclge_sync_vport_promisc_mode()
12418 &vport->state); in hclge_sync_vport_promisc_mode()
12423 if (vport->vf_info.trusted) { in hclge_sync_vport_promisc_mode()
12424 uc_en = vport->vf_info.request_uc_en > 0 || in hclge_sync_vport_promisc_mode()
12425 vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE; in hclge_sync_vport_promisc_mode()
12426 mc_en = vport->vf_info.request_mc_en > 0 || in hclge_sync_vport_promisc_mode()
12427 vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE; in hclge_sync_vport_promisc_mode()
12429 bc_en = vport->vf_info.request_bc_en > 0; in hclge_sync_vport_promisc_mode()
12431 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, in hclge_sync_vport_promisc_mode()
12434 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); in hclge_sync_vport_promisc_mode()
12448 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_promisc_mode()
12449 vport = &hdev->vport[i]; in hclge_sync_promisc_mode()
12464 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_module_existed()
12466 dev_err(&hdev->pdev->dev, in hclge_module_existed()
12495 if (i < HCLGE_SFP_INFO_CMD_NUM - 1) in hclge_get_sfp_eeprom_info()
12501 sfp_info_bd0->offset = cpu_to_le16((u16)offset); in hclge_get_sfp_eeprom_info()
12503 sfp_info_bd0->read_len = cpu_to_le16(read_len); in hclge_get_sfp_eeprom_info()
12505 ret = hclge_cmd_send(&hdev->hw, desc, i); in hclge_get_sfp_eeprom_info()
12507 dev_err(&hdev->pdev->dev, in hclge_get_sfp_eeprom_info()
12514 memcpy(data, sfp_info_bd0->data, copy_len); in hclge_get_sfp_eeprom_info()
12522 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN); in hclge_get_sfp_eeprom_info()
12534 struct hclge_dev *hdev = vport->back; in hclge_get_module_eeprom()
12538 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) in hclge_get_module_eeprom()
12539 return -EOPNOTSUPP; in hclge_get_module_eeprom()
12542 return -ENXIO; in hclge_get_module_eeprom()
12547 len - read_len, in hclge_get_module_eeprom()
12550 return -EIO; in hclge_get_module_eeprom()
12562 struct hclge_dev *hdev = vport->back; in hclge_get_link_diagnosis_info()
12566 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) in hclge_get_link_diagnosis_info()
12567 return -EOPNOTSUPP; in hclge_get_link_diagnosis_info()
12570 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_link_diagnosis_info()
12572 dev_err(&hdev->pdev->dev, in hclge_get_link_diagnosis_info()
12586 struct hclge_dev *hdev = vport->back; in hclge_clear_vport_vf_info()
12590 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); in hclge_clear_vport_vf_info()
12591 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_clear_vport_vf_info()
12592 vport->need_notify = 0; in hclge_clear_vport_vf_info()
12593 vport->mps = 0; in hclge_clear_vport_vf_info()
12598 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12609 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12613 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); in hclge_clear_vport_vf_info()
12615 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12619 memset(&vport->vf_info, 0, sizeof(vport->vf_info)); in hclge_clear_vport_vf_info()
12624 struct hclge_dev *hdev = ae_dev->priv; in hclge_clean_vport_config()
12629 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; in hclge_clean_vport_config()
12641 return -EINVAL; in hclge_get_dscp_prio()
12644 *tc_mode = vport->nic.kinfo.tc_map_mode; in hclge_get_dscp_prio()
12646 *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : in hclge_get_dscp_prio()
12647 vport->nic.kinfo.dscp_prio[dscp]; in hclge_get_dscp_prio()
12772 return -ENOMEM; in hclge_init()