Lines Matching +full:tcam +full:- +full:based

1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
424 u64 *data = (u64 *)(&hdev->mac_stats); in hclge_mac_update_stats_defective()
431 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); in hclge_mac_update_stats_defective()
433 dev_err(&hdev->pdev->dev, in hclge_mac_update_stats_defective()
461 u64 *data = (u64 *)(&hdev->mac_stats); in hclge_mac_update_stats_complete()
472 return -ENOMEM; in hclge_mac_update_stats_complete()
475 ret = hclge_cmd_send(&hdev->hw, desc, desc_num); in hclge_mac_update_stats_complete()
511 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_mac_query_reg_num()
518 *desc_num = 1 + ((reg_num - 3) >> 2) + in hclge_mac_query_reg_num()
519 (u32)(((reg_num - 3) & 0x3) ? 1 : 0); in hclge_mac_query_reg_num()
534 else if (ret == -EOPNOTSUPP) in hclge_mac_update_stats()
537 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n"); in hclge_mac_update_stats()
544 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hclge_tqps_update_stats()
546 struct hclge_dev *hdev = vport->back; in hclge_tqps_update_stats()
552 for (i = 0; i < kinfo->num_tqps; i++) { in hclge_tqps_update_stats()
553 queue = handle->kinfo.tqp[i]; in hclge_tqps_update_stats()
559 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); in hclge_tqps_update_stats()
560 ret = hclge_cmd_send(&hdev->hw, desc, 1); in hclge_tqps_update_stats()
562 dev_err(&hdev->pdev->dev, in hclge_tqps_update_stats()
567 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += in hclge_tqps_update_stats()
571 for (i = 0; i < kinfo->num_tqps; i++) { in hclge_tqps_update_stats()
572 queue = handle->kinfo.tqp[i]; in hclge_tqps_update_stats()
579 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); in hclge_tqps_update_stats()
580 ret = hclge_cmd_send(&hdev->hw, desc, 1); in hclge_tqps_update_stats()
582 dev_err(&hdev->pdev->dev, in hclge_tqps_update_stats()
587 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += in hclge_tqps_update_stats()
596 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hclge_tqps_get_stats()
601 for (i = 0; i < kinfo->num_tqps; i++) { in hclge_tqps_get_stats()
602 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); in hclge_tqps_get_stats()
603 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; in hclge_tqps_get_stats()
606 for (i = 0; i < kinfo->num_tqps; i++) { in hclge_tqps_get_stats()
607 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); in hclge_tqps_get_stats()
608 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; in hclge_tqps_get_stats()
616 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hclge_tqps_get_sset_count()
619 return kinfo->num_tqps * (2); in hclge_tqps_get_sset_count()
624 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hclge_tqps_get_strings()
628 for (i = 0; i < kinfo->num_tqps; i++) { in hclge_tqps_get_strings()
629 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], in hclge_tqps_get_strings()
632 tqp->index); in hclge_tqps_get_strings()
636 for (i = 0; i < kinfo->num_tqps; i++) { in hclge_tqps_get_strings()
637 struct hclge_tqp *tqp = container_of(kinfo->tqp[i], in hclge_tqps_get_strings()
640 tqp->index); in hclge_tqps_get_strings()
683 handle = &hdev->vport[0].nic; in hclge_update_stats_for_all()
684 if (handle->client) { in hclge_update_stats_for_all()
687 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
695 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
703 struct hclge_dev *hdev = vport->back; in hclge_update_stats()
706 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) in hclge_update_stats()
711 dev_err(&hdev->pdev->dev, in hclge_update_stats()
717 dev_err(&hdev->pdev->dev, in hclge_update_stats()
721 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); in hclge_update_stats()
732 struct hclge_dev *hdev = vport->back; in hclge_get_sset_count()
742 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); in hclge_get_sset_count()
743 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || in hclge_get_sset_count()
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || in hclge_get_sset_count()
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || in hclge_get_sset_count()
746 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { in hclge_get_sset_count()
748 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; in hclge_get_sset_count()
752 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; in hclge_get_sset_count()
753 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; in hclge_get_sset_count()
755 if (hdev->hw.mac.phydev) { in hclge_get_sset_count()
757 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; in hclge_get_sset_count()
780 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { in hclge_get_strings()
785 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { in hclge_get_strings()
790 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { in hclge_get_strings()
796 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { in hclge_get_strings()
807 struct hclge_dev *hdev = vport->back; in hclge_get_stats()
810 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string, in hclge_get_stats()
819 struct hclge_dev *hdev = vport->back; in hclge_get_mac_stat()
823 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; in hclge_get_mac_stat()
824 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; in hclge_get_mac_stat()
832 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) in hclge_parse_func_status()
833 return -EINVAL; in hclge_parse_func_status()
836 if (status->pf_state & HCLGE_PF_STATE_MAIN) in hclge_parse_func_status()
837 hdev->flag |= HCLGE_FLAG_MAIN; in hclge_parse_func_status()
839 hdev->flag &= ~HCLGE_FLAG_MAIN; in hclge_parse_func_status()
841 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; in hclge_parse_func_status()
858 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_function_status()
860 dev_err(&hdev->pdev->dev, in hclge_query_function_status()
866 if (req->pf_state) in hclge_query_function_status()
881 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_pf_resource()
883 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
889 hdev->num_tqps = le16_to_cpu(req->tqp_num); in hclge_query_pf_resource()
890 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
892 if (req->tx_buf_size) in hclge_query_pf_resource()
893 hdev->tx_buf_size = in hclge_query_pf_resource()
894 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
896 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; in hclge_query_pf_resource()
898 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
900 if (req->dv_buf_size) in hclge_query_pf_resource()
901 hdev->dv_buf_size = in hclge_query_pf_resource()
902 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
904 hdev->dv_buf_size = HCLGE_DEFAULT_DV; in hclge_query_pf_resource()
906 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
909 hdev->roce_base_msix_offset = in hclge_query_pf_resource()
910 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), in hclge_query_pf_resource()
912 hdev->num_roce_msi = in hclge_query_pf_resource()
913 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number), in hclge_query_pf_resource()
917 hdev->num_nic_msi = hdev->num_roce_msi; in hclge_query_pf_resource()
922 hdev->num_msi = hdev->num_roce_msi + in hclge_query_pf_resource()
923 hdev->roce_base_msix_offset; in hclge_query_pf_resource()
925 hdev->num_msi = in hclge_query_pf_resource()
926 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number), in hclge_query_pf_resource()
929 hdev->num_nic_msi = hdev->num_msi; in hclge_query_pf_resource()
932 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { in hclge_query_pf_resource()
933 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
935 hdev->num_nic_msi); in hclge_query_pf_resource()
936 return -EINVAL; in hclge_query_pf_resource()
973 return -EINVAL; in hclge_parse_speed()
982 struct hclge_dev *hdev = vport->back; in hclge_check_port_speed()
983 u32 speed_ability = hdev->hw.mac.speed_ability; in hclge_check_port_speed()
1015 return -EINVAL; in hclge_check_port_speed()
1021 return -EINVAL; in hclge_check_port_speed()
1028 mac->supported); in hclge_convert_setting_sr()
1031 mac->supported); in hclge_convert_setting_sr()
1034 mac->supported); in hclge_convert_setting_sr()
1037 mac->supported); in hclge_convert_setting_sr()
1040 mac->supported); in hclge_convert_setting_sr()
1043 mac->supported); in hclge_convert_setting_sr()
1050 mac->supported); in hclge_convert_setting_lr()
1053 mac->supported); in hclge_convert_setting_lr()
1056 mac->supported); in hclge_convert_setting_lr()
1059 mac->supported); in hclge_convert_setting_lr()
1062 mac->supported); in hclge_convert_setting_lr()
1066 mac->supported); in hclge_convert_setting_lr()
1073 mac->supported); in hclge_convert_setting_cr()
1076 mac->supported); in hclge_convert_setting_cr()
1079 mac->supported); in hclge_convert_setting_cr()
1082 mac->supported); in hclge_convert_setting_cr()
1085 mac->supported); in hclge_convert_setting_cr()
1088 mac->supported); in hclge_convert_setting_cr()
1095 mac->supported); in hclge_convert_setting_kr()
1098 mac->supported); in hclge_convert_setting_kr()
1101 mac->supported); in hclge_convert_setting_kr()
1104 mac->supported); in hclge_convert_setting_kr()
1107 mac->supported); in hclge_convert_setting_kr()
1110 mac->supported); in hclge_convert_setting_kr()
1113 mac->supported); in hclge_convert_setting_kr()
1118 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported); in hclge_convert_setting_fec()
1119 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); in hclge_convert_setting_fec()
1121 switch (mac->speed) { in hclge_convert_setting_fec()
1125 mac->supported); in hclge_convert_setting_fec()
1126 mac->fec_ability = in hclge_convert_setting_fec()
1132 mac->supported); in hclge_convert_setting_fec()
1133 mac->fec_ability = in hclge_convert_setting_fec()
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); in hclge_convert_setting_fec()
1140 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO); in hclge_convert_setting_fec()
1143 mac->fec_ability = 0; in hclge_convert_setting_fec()
1151 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_fiber_link_mode()
1155 mac->supported); in hclge_parse_fiber_link_mode()
1163 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); in hclge_parse_fiber_link_mode()
1164 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); in hclge_parse_fiber_link_mode()
1165 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); in hclge_parse_fiber_link_mode()
1171 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_backplane_link_mode()
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); in hclge_parse_backplane_link_mode()
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); in hclge_parse_backplane_link_mode()
1178 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); in hclge_parse_backplane_link_mode()
1184 unsigned long *supported = hdev->hw.mac.supported; in hclge_parse_copper_link_mode()
1214 u8 media_type = hdev->hw.mac.media_type; in hclge_parse_link_mode()
1269 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), in hclge_parse_cfg()
1272 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), in hclge_parse_cfg()
1274 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), in hclge_parse_cfg()
1278 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1281 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1284 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1288 mac_addr_tmp = __le32_to_cpu(req->param[2]); in hclge_parse_cfg()
1289 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), in hclge_parse_cfg()
1295 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), in hclge_parse_cfg()
1298 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), in hclge_parse_cfg()
1303 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; in hclge_parse_cfg()
1306 cfg->numa_node_map = __le32_to_cpu(req->param[0]); in hclge_parse_cfg()
1308 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1311 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1314 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT; in hclge_parse_cfg()
1316 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), in hclge_parse_cfg()
1319 if (!cfg->umv_space) in hclge_parse_cfg()
1320 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; in hclge_parse_cfg()
1345 req->offset = cpu_to_le32(offset); in hclge_get_cfg()
1348 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); in hclge_get_cfg()
1350 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); in hclge_get_cfg()
1363 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_set_default_dev_specs()
1365 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; in hclge_set_default_dev_specs()
1366 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; in hclge_set_default_dev_specs()
1367 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE; in hclge_set_default_dev_specs()
1368 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; in hclge_set_default_dev_specs()
1374 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_parse_dev_specs()
1379 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; in hclge_parse_dev_specs()
1380 ae_dev->dev_specs.rss_ind_tbl_size = in hclge_parse_dev_specs()
1381 le16_to_cpu(req0->rss_ind_tbl_size); in hclge_parse_dev_specs()
1382 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); in hclge_parse_dev_specs()
1383 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); in hclge_parse_dev_specs()
1388 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; in hclge_check_dev_specs()
1390 if (!dev_specs->max_non_tso_bd_num) in hclge_check_dev_specs()
1391 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; in hclge_check_dev_specs()
1392 if (!dev_specs->rss_ind_tbl_size) in hclge_check_dev_specs()
1393 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; in hclge_check_dev_specs()
1394 if (!dev_specs->rss_key_size) in hclge_check_dev_specs()
1395 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE; in hclge_check_dev_specs()
1396 if (!dev_specs->max_tm_rate) in hclge_check_dev_specs()
1397 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; in hclge_check_dev_specs()
1409 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { in hclge_query_dev_specs()
1414 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) { in hclge_query_dev_specs()
1421 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); in hclge_query_dev_specs()
1437 dev_err(&hdev->pdev->dev, in hclge_get_cap()
1454 dev_info(&hdev->pdev->dev, in hclge_init_kdump_kernel_config()
1458 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; in hclge_init_kdump_kernel_config()
1459 hdev->num_tx_desc = HCLGE_MIN_TX_DESC; in hclge_init_kdump_kernel_config()
1460 hdev->num_rx_desc = HCLGE_MIN_RX_DESC; in hclge_init_kdump_kernel_config()
1473 hdev->num_vmdq_vport = cfg.vmdq_vport_num; in hclge_configure()
1474 hdev->base_tqp_pid = 0; in hclge_configure()
1475 hdev->rss_size_max = cfg.rss_size_max; in hclge_configure()
1476 hdev->rx_buf_len = cfg.rx_buf_len; in hclge_configure()
1477 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); in hclge_configure()
1478 hdev->hw.mac.media_type = cfg.media_type; in hclge_configure()
1479 hdev->hw.mac.phy_addr = cfg.phy_addr; in hclge_configure()
1480 hdev->num_tx_desc = cfg.tqp_desc_num; in hclge_configure()
1481 hdev->num_rx_desc = cfg.tqp_desc_num; in hclge_configure()
1482 hdev->tm_info.num_pg = 1; in hclge_configure()
1483 hdev->tc_max = cfg.tc_num; in hclge_configure()
1484 hdev->tm_info.hw_pfc_map = 0; in hclge_configure()
1485 hdev->wanted_umv_size = cfg.umv_space; in hclge_configure()
1488 hdev->fd_en = true; in hclge_configure()
1489 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_configure()
1492 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); in hclge_configure()
1494 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", in hclge_configure()
1501 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); in hclge_configure()
1503 if ((hdev->tc_max > HNAE3_MAX_TC) || in hclge_configure()
1504 (hdev->tc_max < 1)) { in hclge_configure()
1505 dev_warn(&hdev->pdev->dev, "TC num = %u.\n", in hclge_configure()
1506 hdev->tc_max); in hclge_configure()
1507 hdev->tc_max = 1; in hclge_configure()
1512 hdev->tc_max = 1; in hclge_configure()
1513 hdev->pfc_max = 0; in hclge_configure()
1515 hdev->pfc_max = hdev->tc_max; in hclge_configure()
1518 hdev->tm_info.num_tc = 1; in hclge_configure()
1521 for (i = 0; i < hdev->tm_info.num_tc; i++) in hclge_configure()
1522 hnae3_set_bit(hdev->hw_tc_map, i, 1); in hclge_configure()
1524 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; in hclge_configure()
1528 /* Set the init affinity based on pci func number */ in hclge_configure()
1529 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev))); in hclge_configure()
1530 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0; in hclge_configure()
1531 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)), in hclge_configure()
1532 &hdev->affinity_mask); in hclge_configure()
1546 req->tso_mss_min = cpu_to_le16(tso_mss_min); in hclge_config_tso()
1547 req->tso_mss_max = cpu_to_le16(tso_mss_max); in hclge_config_tso()
1549 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_tso()
1564 req->gro_en = en ? 1 : 0; in hclge_config_gro()
1566 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_gro()
1568 dev_err(&hdev->pdev->dev, in hclge_config_gro()
1579 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, in hclge_alloc_tqps()
1581 if (!hdev->htqp) in hclge_alloc_tqps()
1582 return -ENOMEM; in hclge_alloc_tqps()
1584 tqp = hdev->htqp; in hclge_alloc_tqps()
1586 for (i = 0; i < hdev->num_tqps; i++) { in hclge_alloc_tqps()
1587 tqp->dev = &hdev->pdev->dev; in hclge_alloc_tqps()
1588 tqp->index = i; in hclge_alloc_tqps()
1590 tqp->q.ae_algo = &ae_algo; in hclge_alloc_tqps()
1591 tqp->q.buf_size = hdev->rx_buf_len; in hclge_alloc_tqps()
1592 tqp->q.tx_desc_num = hdev->num_tx_desc; in hclge_alloc_tqps()
1593 tqp->q.rx_desc_num = hdev->num_rx_desc; in hclge_alloc_tqps()
1594 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + in hclge_alloc_tqps()
1613 req->tqp_id = cpu_to_le16(tqp_pid); in hclge_map_tqps_to_func()
1614 req->tqp_vf = func_id; in hclge_map_tqps_to_func()
1615 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B; in hclge_map_tqps_to_func()
1617 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B; in hclge_map_tqps_to_func()
1618 req->tqp_vid = cpu_to_le16(tqp_vid); in hclge_map_tqps_to_func()
1620 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_map_tqps_to_func()
1622 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); in hclge_map_tqps_to_func()
1629 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; in hclge_assign_tqp()
1630 struct hclge_dev *hdev = vport->back; in hclge_assign_tqp()
1633 for (i = 0, alloced = 0; i < hdev->num_tqps && in hclge_assign_tqp()
1635 if (!hdev->htqp[i].alloced) { in hclge_assign_tqp()
1636 hdev->htqp[i].q.handle = &vport->nic; in hclge_assign_tqp()
1637 hdev->htqp[i].q.tqp_index = alloced; in hclge_assign_tqp()
1638 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; in hclge_assign_tqp()
1639 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; in hclge_assign_tqp()
1640 kinfo->tqp[alloced] = &hdev->htqp[i].q; in hclge_assign_tqp()
1641 hdev->htqp[i].alloced = true; in hclge_assign_tqp()
1645 vport->alloc_tqps = alloced; in hclge_assign_tqp()
1646 kinfo->rss_size = min_t(u16, hdev->rss_size_max, in hclge_assign_tqp()
1647 vport->alloc_tqps / hdev->tm_info.num_tc); in hclge_assign_tqp()
1650 kinfo->rss_size = min_t(u16, kinfo->rss_size, in hclge_assign_tqp()
1651 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); in hclge_assign_tqp()
1660 struct hnae3_handle *nic = &vport->nic; in hclge_knic_setup()
1661 struct hnae3_knic_private_info *kinfo = &nic->kinfo; in hclge_knic_setup()
1662 struct hclge_dev *hdev = vport->back; in hclge_knic_setup()
1665 kinfo->num_tx_desc = num_tx_desc; in hclge_knic_setup()
1666 kinfo->num_rx_desc = num_rx_desc; in hclge_knic_setup()
1668 kinfo->rx_buf_len = hdev->rx_buf_len; in hclge_knic_setup()
1670 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, in hclge_knic_setup()
1672 if (!kinfo->tqp) in hclge_knic_setup()
1673 return -ENOMEM; in hclge_knic_setup()
1677 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); in hclge_knic_setup()
1685 struct hnae3_handle *nic = &vport->nic; in hclge_map_tqp_to_vport()
1689 kinfo = &nic->kinfo; in hclge_map_tqp_to_vport()
1690 for (i = 0; i < vport->alloc_tqps; i++) { in hclge_map_tqp_to_vport()
1692 container_of(kinfo->tqp[i], struct hclge_tqp, q); in hclge_map_tqp_to_vport()
1696 is_pf = !(vport->vport_id); in hclge_map_tqp_to_vport()
1697 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, in hclge_map_tqp_to_vport()
1708 struct hclge_vport *vport = hdev->vport; in hclge_map_tqp()
1711 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; in hclge_map_tqp()
1727 struct hnae3_handle *nic = &vport->nic; in hclge_vport_setup()
1728 struct hclge_dev *hdev = vport->back; in hclge_vport_setup()
1731 nic->pdev = hdev->pdev; in hclge_vport_setup()
1732 nic->ae_algo = &ae_algo; in hclge_vport_setup()
1733 nic->numa_node_mask = hdev->numa_node_mask; in hclge_vport_setup()
1736 hdev->num_tx_desc, hdev->num_rx_desc); in hclge_vport_setup()
1738 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); in hclge_vport_setup()
1745 struct pci_dev *pdev = hdev->pdev; in hclge_alloc_vport()
1753 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; in hclge_alloc_vport()
1755 if (hdev->num_tqps < num_vport) { in hclge_alloc_vport()
1756 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", in hclge_alloc_vport()
1757 hdev->num_tqps, num_vport); in hclge_alloc_vport()
1758 return -EINVAL; in hclge_alloc_vport()
1762 tqp_per_vport = hdev->num_tqps / num_vport; in hclge_alloc_vport()
1763 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; in hclge_alloc_vport()
1765 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), in hclge_alloc_vport()
1768 return -ENOMEM; in hclge_alloc_vport()
1770 hdev->vport = vport; in hclge_alloc_vport()
1771 hdev->num_alloc_vport = num_vport; in hclge_alloc_vport()
1774 hdev->num_alloc_vfs = hdev->num_req_vfs; in hclge_alloc_vport()
1777 vport->back = hdev; in hclge_alloc_vport()
1778 vport->vport_id = i; in hclge_alloc_vport()
1779 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; in hclge_alloc_vport()
1780 vport->mps = HCLGE_MAC_DEFAULT_FRAME; in hclge_alloc_vport()
1781 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; in hclge_alloc_vport()
1782 vport->rxvlan_cfg.rx_vlan_offload_en = true; in hclge_alloc_vport()
1783 INIT_LIST_HEAD(&vport->vlan_list); in hclge_alloc_vport()
1784 INIT_LIST_HEAD(&vport->uc_mac_list); in hclge_alloc_vport()
1785 INIT_LIST_HEAD(&vport->mc_mac_list); in hclge_alloc_vport()
1786 spin_lock_init(&vport->mac_list_lock); in hclge_alloc_vport()
1793 dev_err(&pdev->dev, in hclge_alloc_vport()
1820 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; in hclge_cmd_alloc_tx_buff()
1822 req->tx_pkt_buff[i] = in hclge_cmd_alloc_tx_buff()
1827 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_alloc_tx_buff()
1829 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", in hclge_cmd_alloc_tx_buff()
1841 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); in hclge_tx_buffer_alloc()
1852 if (hdev->hw_tc_map & BIT(i)) in hclge_get_tc_num()
1866 priv = &buf_alloc->priv_buf[i]; in hclge_get_pfc_priv_num()
1867 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_pfc_priv_num()
1868 priv->enable) in hclge_get_pfc_priv_num()
1884 priv = &buf_alloc->priv_buf[i]; in hclge_get_no_pfc_priv_num()
1885 if (hdev->hw_tc_map & BIT(i) && in hclge_get_no_pfc_priv_num()
1886 !(hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_no_pfc_priv_num()
1887 priv->enable) in hclge_get_no_pfc_priv_num()
1901 priv = &buf_alloc->priv_buf[i]; in hclge_get_rx_priv_buff_alloced()
1902 if (priv->enable) in hclge_get_rx_priv_buff_alloced()
1903 rx_priv += priv->buf_size; in hclge_get_rx_priv_buff_alloced()
1913 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; in hclge_get_tx_buff_alloced()
1928 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_is_rx_buf_ok()
1932 hdev->dv_buf_size; in hclge_is_rx_buf_ok()
1935 + hdev->dv_buf_size; in hclge_is_rx_buf_ok()
1945 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); in hclge_is_rx_buf_ok()
1946 buf_alloc->s_buf.buf_size = shared_buf; in hclge_is_rx_buf_ok()
1948 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
1949 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high in hclge_is_rx_buf_ok()
1950 - roundup(aligned_mps / HCLGE_BUF_DIV_BY, in hclge_is_rx_buf_ok()
1953 buf_alloc->s_buf.self.high = aligned_mps + in hclge_is_rx_buf_ok()
1955 buf_alloc->s_buf.self.low = aligned_mps; in hclge_is_rx_buf_ok()
1959 hi_thrd = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
1970 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY; in hclge_is_rx_buf_ok()
1977 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; in hclge_is_rx_buf_ok()
1978 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; in hclge_is_rx_buf_ok()
1989 total_size = hdev->pkt_buf_size; in hclge_tx_buffer_calc()
1993 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_tx_buffer_calc()
1995 if (hdev->hw_tc_map & BIT(i)) { in hclge_tx_buffer_calc()
1996 if (total_size < hdev->tx_buf_size) in hclge_tx_buffer_calc()
1997 return -ENOMEM; in hclge_tx_buffer_calc()
1999 priv->tx_buf_size = hdev->tx_buf_size; in hclge_tx_buffer_calc()
2001 priv->tx_buf_size = 0; in hclge_tx_buffer_calc()
2004 total_size -= priv->tx_buf_size; in hclge_tx_buffer_calc()
2013 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_rx_buf_calc_all()
2014 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_rx_buf_calc_all()
2018 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_rx_buf_calc_all()
2020 priv->enable = 0; in hclge_rx_buf_calc_all()
2021 priv->wl.low = 0; in hclge_rx_buf_calc_all()
2022 priv->wl.high = 0; in hclge_rx_buf_calc_all()
2023 priv->buf_size = 0; in hclge_rx_buf_calc_all()
2025 if (!(hdev->hw_tc_map & BIT(i))) in hclge_rx_buf_calc_all()
2028 priv->enable = 1; in hclge_rx_buf_calc_all()
2030 if (hdev->tm_info.hw_pfc_map & BIT(i)) { in hclge_rx_buf_calc_all()
2031 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; in hclge_rx_buf_calc_all()
2032 priv->wl.high = roundup(priv->wl.low + aligned_mps, in hclge_rx_buf_calc_all()
2035 priv->wl.low = 0; in hclge_rx_buf_calc_all()
2036 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : in hclge_rx_buf_calc_all()
2040 priv->buf_size = priv->wl.high + hdev->dv_buf_size; in hclge_rx_buf_calc_all()
2049 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_nopfc_buf_till_fit()
2054 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { in hclge_drop_nopfc_buf_till_fit()
2055 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_drop_nopfc_buf_till_fit()
2058 if (hdev->hw_tc_map & mask && in hclge_drop_nopfc_buf_till_fit()
2059 !(hdev->tm_info.hw_pfc_map & mask)) { in hclge_drop_nopfc_buf_till_fit()
2061 priv->wl.low = 0; in hclge_drop_nopfc_buf_till_fit()
2062 priv->wl.high = 0; in hclge_drop_nopfc_buf_till_fit()
2063 priv->buf_size = 0; in hclge_drop_nopfc_buf_till_fit()
2064 priv->enable = 0; in hclge_drop_nopfc_buf_till_fit()
2065 no_pfc_priv_num--; in hclge_drop_nopfc_buf_till_fit()
2079 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_pfc_buf_till_fit()
2084 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { in hclge_drop_pfc_buf_till_fit()
2085 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_drop_pfc_buf_till_fit()
2088 if (hdev->hw_tc_map & mask && in hclge_drop_pfc_buf_till_fit()
2089 hdev->tm_info.hw_pfc_map & mask) { in hclge_drop_pfc_buf_till_fit()
2091 priv->wl.low = 0; in hclge_drop_pfc_buf_till_fit()
2092 priv->enable = 0; in hclge_drop_pfc_buf_till_fit()
2093 priv->wl.high = 0; in hclge_drop_pfc_buf_till_fit()
2094 priv->buf_size = 0; in hclge_drop_pfc_buf_till_fit()
2095 pfc_priv_num--; in hclge_drop_pfc_buf_till_fit()
2113 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_only_alloc_priv_buff()
2115 u32 half_mps = hdev->mps >> 1; in hclge_only_alloc_priv_buff()
2125 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + in hclge_only_alloc_priv_buff()
2134 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_only_alloc_priv_buff()
2136 priv->enable = 0; in hclge_only_alloc_priv_buff()
2137 priv->wl.low = 0; in hclge_only_alloc_priv_buff()
2138 priv->wl.high = 0; in hclge_only_alloc_priv_buff()
2139 priv->buf_size = 0; in hclge_only_alloc_priv_buff()
2141 if (!(hdev->hw_tc_map & BIT(i))) in hclge_only_alloc_priv_buff()
2144 priv->enable = 1; in hclge_only_alloc_priv_buff()
2145 priv->buf_size = rx_priv; in hclge_only_alloc_priv_buff()
2146 priv->wl.high = rx_priv - hdev->dv_buf_size; in hclge_only_alloc_priv_buff()
2147 priv->wl.low = priv->wl.high - PRIV_WL_GAP; in hclge_only_alloc_priv_buff()
2150 buf_alloc->s_buf.buf_size = 0; in hclge_only_alloc_priv_buff()
2165 u32 rx_all = hdev->pkt_buf_size; in hclge_rx_buffer_calc()
2167 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); in hclge_rx_buffer_calc()
2169 return -ENOMEM; in hclge_rx_buffer_calc()
2190 return -ENOMEM; in hclge_rx_buffer_calc()
2206 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; in hclge_rx_priv_buf_alloc()
2208 req->buf_num[i] = in hclge_rx_priv_buf_alloc()
2209 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); in hclge_rx_priv_buf_alloc()
2210 req->buf_num[i] |= in hclge_rx_priv_buf_alloc()
2214 req->shared_buf = in hclge_rx_priv_buf_alloc()
2215 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | in hclge_rx_priv_buf_alloc()
2218 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_rx_priv_buf_alloc()
2220 dev_err(&hdev->pdev->dev, in hclge_rx_priv_buf_alloc()
2249 priv = &buf_alloc->priv_buf[idx]; in hclge_rx_priv_wl_config()
2250 req->tc_wl[j].high = in hclge_rx_priv_wl_config()
2251 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); in hclge_rx_priv_wl_config()
2252 req->tc_wl[j].high |= in hclge_rx_priv_wl_config()
2254 req->tc_wl[j].low = in hclge_rx_priv_wl_config()
2255 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); in hclge_rx_priv_wl_config()
2256 req->tc_wl[j].low |= in hclge_rx_priv_wl_config()
2262 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_rx_priv_wl_config()
2264 dev_err(&hdev->pdev->dev, in hclge_rx_priv_wl_config()
2273 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; in hclge_common_thrd_config()
2292 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; in hclge_common_thrd_config()
2294 req->com_thrd[j].high = in hclge_common_thrd_config()
2295 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); in hclge_common_thrd_config()
2296 req->com_thrd[j].high |= in hclge_common_thrd_config()
2298 req->com_thrd[j].low = in hclge_common_thrd_config()
2299 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); in hclge_common_thrd_config()
2300 req->com_thrd[j].low |= in hclge_common_thrd_config()
2306 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_common_thrd_config()
2308 dev_err(&hdev->pdev->dev, in hclge_common_thrd_config()
2316 struct hclge_shared_buf *buf = &buf_alloc->s_buf; in hclge_common_wl_config()
2324 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); in hclge_common_wl_config()
2325 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); in hclge_common_wl_config()
2327 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); in hclge_common_wl_config()
2328 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); in hclge_common_wl_config()
2330 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_common_wl_config()
2332 dev_err(&hdev->pdev->dev, in hclge_common_wl_config()
2345 return -ENOMEM; in hclge_buffer_alloc()
2349 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2356 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2363 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2371 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", in hclge_buffer_alloc()
2379 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2387 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2396 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2406 struct hnae3_handle *roce = &vport->roce; in hclge_init_roce_base_info()
2407 struct hnae3_handle *nic = &vport->nic; in hclge_init_roce_base_info()
2409 roce->rinfo.num_vectors = vport->back->num_roce_msi; in hclge_init_roce_base_info()
2411 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || in hclge_init_roce_base_info()
2412 vport->back->num_msi_left == 0) in hclge_init_roce_base_info()
2413 return -EINVAL; in hclge_init_roce_base_info()
2415 roce->rinfo.base_vector = vport->back->roce_base_vector; in hclge_init_roce_base_info()
2417 roce->rinfo.netdev = nic->kinfo.netdev; in hclge_init_roce_base_info()
2418 roce->rinfo.roce_io_base = vport->back->hw.io_base; in hclge_init_roce_base_info()
2420 roce->pdev = nic->pdev; in hclge_init_roce_base_info()
2421 roce->ae_algo = nic->ae_algo; in hclge_init_roce_base_info()
2422 roce->numa_node_mask = nic->numa_node_mask; in hclge_init_roce_base_info()
2429 struct pci_dev *pdev = hdev->pdev; in hclge_init_msi()
2434 hdev->num_msi, in hclge_init_msi()
2437 dev_err(&pdev->dev, in hclge_init_msi()
2438 "failed(%d) to allocate MSI/MSI-X vectors\n", in hclge_init_msi()
2442 if (vectors < hdev->num_msi) in hclge_init_msi()
2443 dev_warn(&hdev->pdev->dev, in hclge_init_msi()
2444 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", in hclge_init_msi()
2445 hdev->num_msi, vectors); in hclge_init_msi()
2447 hdev->num_msi = vectors; in hclge_init_msi()
2448 hdev->num_msi_left = vectors; in hclge_init_msi()
2450 hdev->base_msi_vector = pdev->irq; in hclge_init_msi()
2451 hdev->roce_base_vector = hdev->base_msi_vector + in hclge_init_msi()
2452 hdev->roce_base_msix_offset; in hclge_init_msi()
2454 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2456 if (!hdev->vector_status) { in hclge_init_msi()
2458 return -ENOMEM; in hclge_init_msi()
2461 for (i = 0; i < hdev->num_msi; i++) in hclge_init_msi()
2462 hdev->vector_status[i] = HCLGE_INVALID_VPORT; in hclge_init_msi()
2464 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2466 if (!hdev->vector_irq) { in hclge_init_msi()
2468 return -ENOMEM; in hclge_init_msi()
2494 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); in hclge_cfg_mac_speed_dup_hw()
2498 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, in hclge_cfg_mac_speed_dup_hw()
2502 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, in hclge_cfg_mac_speed_dup_hw()
2506 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, in hclge_cfg_mac_speed_dup_hw()
2510 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, in hclge_cfg_mac_speed_dup_hw()
2514 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, in hclge_cfg_mac_speed_dup_hw()
2518 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, in hclge_cfg_mac_speed_dup_hw()
2522 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, in hclge_cfg_mac_speed_dup_hw()
2526 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, in hclge_cfg_mac_speed_dup_hw()
2530 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, in hclge_cfg_mac_speed_dup_hw()
2534 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); in hclge_cfg_mac_speed_dup_hw()
2535 return -EINVAL; in hclge_cfg_mac_speed_dup_hw()
2538 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, in hclge_cfg_mac_speed_dup_hw()
2541 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_speed_dup_hw()
2543 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_speed_dup_hw()
2553 struct hclge_mac *mac = &hdev->hw.mac; in hclge_cfg_mac_speed_dup()
2557 if (!mac->support_autoneg && mac->speed == speed && in hclge_cfg_mac_speed_dup()
2558 mac->duplex == duplex) in hclge_cfg_mac_speed_dup()
2565 hdev->hw.mac.speed = speed; in hclge_cfg_mac_speed_dup()
2566 hdev->hw.mac.duplex = duplex; in hclge_cfg_mac_speed_dup()
2575 struct hclge_dev *hdev = vport->back; in hclge_cfg_mac_speed_dup_h()
2592 req->cfg_an_cmd_flag = cpu_to_le32(flag); in hclge_set_autoneg_en()
2594 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_autoneg_en()
2596 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", in hclge_set_autoneg_en()
2605 struct hclge_dev *hdev = vport->back; in hclge_set_autoneg()
2607 if (!hdev->hw.mac.support_autoneg) { in hclge_set_autoneg()
2609 dev_err(&hdev->pdev->dev, in hclge_set_autoneg()
2611 return -EOPNOTSUPP; in hclge_set_autoneg()
2623 struct hclge_dev *hdev = vport->back; in hclge_get_autoneg()
2624 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_autoneg()
2627 return phydev->autoneg; in hclge_get_autoneg()
2629 return hdev->hw.mac.autoneg; in hclge_get_autoneg()
2635 struct hclge_dev *hdev = vport->back; in hclge_restart_autoneg()
2638 dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); in hclge_restart_autoneg()
2649 struct hclge_dev *hdev = vport->back; in hclge_halt_autoneg()
2651 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) in hclge_halt_autoneg()
2667 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); in hclge_set_fec_hw()
2669 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, in hclge_set_fec_hw()
2672 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, in hclge_set_fec_hw()
2675 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fec_hw()
2677 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); in hclge_set_fec_hw()
2685 struct hclge_dev *hdev = vport->back; in hclge_set_fec()
2686 struct hclge_mac *mac = &hdev->hw.mac; in hclge_set_fec()
2689 if (fec_mode && !(mac->fec_ability & fec_mode)) { in hclge_set_fec()
2690 dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); in hclge_set_fec()
2691 return -EINVAL; in hclge_set_fec()
2698 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); in hclge_set_fec()
2706 struct hclge_dev *hdev = vport->back; in hclge_get_fec()
2707 struct hclge_mac *mac = &hdev->hw.mac; in hclge_get_fec()
2710 *fec_ability = mac->fec_ability; in hclge_get_fec()
2712 *fec_mode = mac->fec_mode; in hclge_get_fec()
2717 struct hclge_mac *mac = &hdev->hw.mac; in hclge_mac_init()
2720 hdev->support_sfp_query = true; in hclge_mac_init()
2721 hdev->hw.mac.duplex = HCLGE_MAC_FULL; in hclge_mac_init()
2722 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, in hclge_mac_init()
2723 hdev->hw.mac.duplex); in hclge_mac_init()
2727 if (hdev->hw.mac.support_autoneg) { in hclge_mac_init()
2728 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); in hclge_mac_init()
2733 mac->link = 0; in hclge_mac_init()
2735 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { in hclge_mac_init()
2736 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); in hclge_mac_init()
2741 ret = hclge_set_mac_mtu(hdev, hdev->mps); in hclge_mac_init()
2743 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); in hclge_mac_init()
2753 dev_err(&hdev->pdev->dev, in hclge_mac_init()
2761 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_mbx_task_schedule()
2762 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) in hclge_mbx_task_schedule()
2763 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), in hclge_mbx_task_schedule()
2764 hclge_wq, &hdev->service_task, 0); in hclge_mbx_task_schedule()
2769 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_reset_task_schedule()
2770 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) in hclge_reset_task_schedule()
2771 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), in hclge_reset_task_schedule()
2772 hclge_wq, &hdev->service_task, 0); in hclge_reset_task_schedule()
2777 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_task_schedule()
2778 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) in hclge_task_schedule()
2779 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), in hclge_task_schedule()
2780 hclge_wq, &hdev->service_task, in hclge_task_schedule()
2791 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_mac_link_status()
2793 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", in hclge_get_mac_link_status()
2799 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? in hclge_get_mac_link_status()
2807 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mac_phy_link()
2811 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) in hclge_get_mac_phy_link()
2814 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) in hclge_get_mac_phy_link()
2822 struct hnae3_client *rclient = hdev->roce_client; in hclge_update_link_status()
2823 struct hnae3_client *client = hdev->nic_client; in hclge_update_link_status()
2833 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) in hclge_update_link_status()
2838 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_update_link_status()
2842 if (state != hdev->hw.mac.link) { in hclge_update_link_status()
2843 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { in hclge_update_link_status()
2844 handle = &hdev->vport[i].nic; in hclge_update_link_status()
2845 client->ops->link_status_change(handle, state); in hclge_update_link_status()
2847 rhandle = &hdev->vport[i].roce; in hclge_update_link_status()
2848 if (rclient && rclient->ops->link_status_change) in hclge_update_link_status()
2849 rclient->ops->link_status_change(rhandle, in hclge_update_link_status()
2852 hdev->hw.mac.link = state; in hclge_update_link_status()
2855 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_update_link_status()
2866 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && in hclge_update_port_capability()
2867 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) in hclge_update_port_capability()
2868 mac->module_type = HNAE3_MODULE_TYPE_KR; in hclge_update_port_capability()
2869 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) in hclge_update_port_capability()
2870 mac->module_type = HNAE3_MODULE_TYPE_TP; in hclge_update_port_capability()
2872 if (mac->support_autoneg) { in hclge_update_port_capability()
2873 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); in hclge_update_port_capability()
2874 linkmode_copy(mac->advertising, mac->supported); in hclge_update_port_capability()
2877 mac->supported); in hclge_update_port_capability()
2878 linkmode_zero(mac->advertising); in hclge_update_port_capability()
2890 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_speed()
2891 if (ret == -EOPNOTSUPP) { in hclge_get_sfp_speed()
2892 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_speed()
2896 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); in hclge_get_sfp_speed()
2900 *speed = le32_to_cpu(resp->speed); in hclge_get_sfp_speed()
2914 resp->query_type = QUERY_ACTIVE_SPEED; in hclge_get_sfp_info()
2916 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_info()
2917 if (ret == -EOPNOTSUPP) { in hclge_get_sfp_info()
2918 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_info()
2922 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); in hclge_get_sfp_info()
2927 * set to mac->speed. in hclge_get_sfp_info()
2929 if (!le32_to_cpu(resp->speed)) in hclge_get_sfp_info()
2932 mac->speed = le32_to_cpu(resp->speed); in hclge_get_sfp_info()
2933 /* if resp->speed_ability is 0, it means it's an old version in hclge_get_sfp_info()
2936 if (resp->speed_ability) { in hclge_get_sfp_info()
2937 mac->module_type = le32_to_cpu(resp->module_type); in hclge_get_sfp_info()
2938 mac->speed_ability = le32_to_cpu(resp->speed_ability); in hclge_get_sfp_info()
2939 mac->autoneg = resp->autoneg; in hclge_get_sfp_info()
2940 mac->support_autoneg = resp->autoneg_ability; in hclge_get_sfp_info()
2941 mac->speed_type = QUERY_ACTIVE_SPEED; in hclge_get_sfp_info()
2942 if (!resp->active_fec) in hclge_get_sfp_info()
2943 mac->fec_mode = 0; in hclge_get_sfp_info()
2945 mac->fec_mode = BIT(resp->active_fec); in hclge_get_sfp_info()
2947 mac->speed_type = QUERY_SFP_SPEED; in hclge_get_sfp_info()
2955 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_port_info()
2960 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) in hclge_update_port_info()
2964 if (!hdev->support_sfp_query) in hclge_update_port_info()
2967 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) in hclge_update_port_info()
2972 if (ret == -EOPNOTSUPP) { in hclge_update_port_info()
2973 hdev->support_sfp_query = false; in hclge_update_port_info()
2979 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_update_port_info()
2980 if (mac->speed_type == QUERY_ACTIVE_SPEED) { in hclge_update_port_info()
2984 return hclge_cfg_mac_speed_dup(hdev, mac->speed, in hclge_update_port_info()
2998 struct hclge_dev *hdev = vport->back; in hclge_get_status()
3002 return hdev->hw.mac.link; in hclge_get_status()
3007 if (!pci_num_vf(hdev->pdev)) { in hclge_get_vf_vport()
3008 dev_err(&hdev->pdev->dev, in hclge_get_vf_vport()
3013 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { in hclge_get_vf_vport()
3014 dev_err(&hdev->pdev->dev, in hclge_get_vf_vport()
3016 vf, pci_num_vf(hdev->pdev)); in hclge_get_vf_vport()
3022 return &hdev->vport[vf]; in hclge_get_vf_vport()
3029 struct hclge_dev *hdev = vport->back; in hclge_get_vf_config()
3033 return -EINVAL; in hclge_get_vf_config()
3035 ivf->vf = vf; in hclge_get_vf_config()
3036 ivf->linkstate = vport->vf_info.link_state; in hclge_get_vf_config()
3037 ivf->spoofchk = vport->vf_info.spoofchk; in hclge_get_vf_config()
3038 ivf->trusted = vport->vf_info.trusted; in hclge_get_vf_config()
3039 ivf->min_tx_rate = 0; in hclge_get_vf_config()
3040 ivf->max_tx_rate = vport->vf_info.max_tx_rate; in hclge_get_vf_config()
3041 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag; in hclge_get_vf_config()
3042 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto); in hclge_get_vf_config()
3043 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos; in hclge_get_vf_config()
3044 ether_addr_copy(ivf->mac, vport->vf_info.mac); in hclge_get_vf_config()
3053 struct hclge_dev *hdev = vport->back; in hclge_set_vf_link_state()
3057 return -EINVAL; in hclge_set_vf_link_state()
3059 vport->vf_info.link_state = link_state; in hclge_set_vf_link_state()
3069 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); in hclge_check_event_cause()
3070 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); in hclge_check_event_cause()
3081 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); in hclge_check_event_cause()
3082 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); in hclge_check_event_cause()
3083 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); in hclge_check_event_cause()
3085 hdev->rst_stats.imp_rst_cnt++; in hclge_check_event_cause()
3090 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); in hclge_check_event_cause()
3091 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); in hclge_check_event_cause()
3092 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); in hclge_check_event_cause()
3094 hdev->rst_stats.global_rst_cnt++; in hclge_check_event_cause()
3112 dev_info(&hdev->pdev->dev, in hclge_check_event_cause()
3125 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); in hclge_clear_event_cause()
3128 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); in hclge_clear_event_cause()
3146 writel(enable ? 1 : 0, vector->addr); in hclge_enable_vector()
3155 hclge_enable_vector(&hdev->misc_vector, false); in hclge_misc_irq_handle()
3171 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); in hclge_misc_irq_handle()
3189 dev_warn(&hdev->pdev->dev, in hclge_misc_irq_handle()
3203 hclge_enable_vector(&hdev->misc_vector, true); in hclge_misc_irq_handle()
3211 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { in hclge_free_vector()
3212 dev_warn(&hdev->pdev->dev, in hclge_free_vector()
3217 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; in hclge_free_vector()
3218 hdev->num_msi_left += 1; in hclge_free_vector()
3219 hdev->num_msi_used -= 1; in hclge_free_vector()
3224 struct hclge_misc_vector *vector = &hdev->misc_vector; in hclge_get_misc_vector()
3226 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); in hclge_get_misc_vector()
3228 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; in hclge_get_misc_vector()
3229 hdev->vector_status[0] = 0; in hclge_get_misc_vector()
3231 hdev->num_msi_left -= 1; in hclge_get_misc_vector()
3232 hdev->num_msi_used += 1; in hclge_get_misc_vector()
3241 cpumask_copy(&hdev->affinity_mask, mask); in hclge_irq_affinity_notify()
3250 irq_set_affinity_hint(hdev->misc_vector.vector_irq, in hclge_misc_affinity_setup()
3251 &hdev->affinity_mask); in hclge_misc_affinity_setup()
3253 hdev->affinity_notify.notify = hclge_irq_affinity_notify; in hclge_misc_affinity_setup()
3254 hdev->affinity_notify.release = hclge_irq_affinity_release; in hclge_misc_affinity_setup()
3255 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, in hclge_misc_affinity_setup()
3256 &hdev->affinity_notify); in hclge_misc_affinity_setup()
3261 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL); in hclge_misc_affinity_teardown()
3262 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); in hclge_misc_affinity_teardown()
3272 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", in hclge_misc_irq_init()
3273 HCLGE_NAME, pci_name(hdev->pdev)); in hclge_misc_irq_init()
3274 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, in hclge_misc_irq_init()
3275 0, hdev->misc_vector.name, hdev); in hclge_misc_irq_init()
3278 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", in hclge_misc_irq_init()
3279 hdev->misc_vector.vector_irq); in hclge_misc_irq_init()
3287 free_irq(hdev->misc_vector.vector_irq, hdev); in hclge_misc_irq_uninit()
3294 struct hnae3_client *client = hdev->nic_client; in hclge_notify_client()
3297 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) in hclge_notify_client()
3300 if (!client->ops->reset_notify) in hclge_notify_client()
3301 return -EOPNOTSUPP; in hclge_notify_client()
3303 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { in hclge_notify_client()
3304 struct hnae3_handle *handle = &hdev->vport[i].nic; in hclge_notify_client()
3307 ret = client->ops->reset_notify(handle, type); in hclge_notify_client()
3309 dev_err(&hdev->pdev->dev, in hclge_notify_client()
3321 struct hnae3_client *client = hdev->roce_client; in hclge_notify_roce_client()
3325 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) in hclge_notify_roce_client()
3328 if (!client->ops->reset_notify) in hclge_notify_roce_client()
3329 return -EOPNOTSUPP; in hclge_notify_roce_client()
3331 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { in hclge_notify_roce_client()
3332 struct hnae3_handle *handle = &hdev->vport[i].roce; in hclge_notify_roce_client()
3334 ret = client->ops->reset_notify(handle, type); in hclge_notify_roce_client()
3336 dev_err(&hdev->pdev->dev, in hclge_notify_roce_client()
3354 switch (hdev->reset_type) { in hclge_reset_wait()
3368 dev_err(&hdev->pdev->dev, in hclge_reset_wait()
3370 hdev->reset_type); in hclge_reset_wait()
3371 return -EINVAL; in hclge_reset_wait()
3374 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3377 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3382 dev_warn(&hdev->pdev->dev, in hclge_reset_wait()
3383 "Wait for reset timeout: %d\n", hdev->reset_type); in hclge_reset_wait()
3384 return -EBUSY; in hclge_reset_wait()
3397 req->dest_vfid = func_id; in hclge_set_vf_rst()
3400 req->vf_rst = 0x1; in hclge_set_vf_rst()
3402 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vf_rst()
3409 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { in hclge_set_all_vf_rst()
3410 struct hclge_vport *vport = &hdev->vport[i]; in hclge_set_all_vf_rst()
3414 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); in hclge_set_all_vf_rst()
3416 dev_err(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3418 vport->vport_id, ret); in hclge_set_all_vf_rst()
3422 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) in hclge_set_all_vf_rst()
3431 dev_warn(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3433 vport->vport_id, ret); in hclge_set_all_vf_rst()
3441 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || in hclge_mailbox_service_task()
3442 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) || in hclge_mailbox_service_task()
3443 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) in hclge_mailbox_service_task()
3448 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_mailbox_service_task()
3465 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_sync_vf()
3469 if (ret == -EOPNOTSUPP) { in hclge_func_reset_sync_vf()
3473 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n", in hclge_func_reset_sync_vf()
3476 } else if (req->all_vf_ready) { in hclge_func_reset_sync_vf()
3483 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); in hclge_func_reset_sync_vf()
3489 struct hnae3_client *client = hdev->nic_client; in hclge_report_hw_error()
3492 if (!client || !client->ops->process_hw_error || in hclge_report_hw_error()
3493 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) in hclge_report_hw_error()
3496 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) in hclge_report_hw_error()
3497 client->ops->process_hw_error(&hdev->vport[i].nic, type); in hclge_report_hw_error()
3504 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_handle_imp_error()
3508 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
3514 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
3525 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); in hclge_func_reset_cmd()
3526 req->fun_reset_vfid = func_id; in hclge_func_reset_cmd()
3528 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_cmd()
3530 dev_err(&hdev->pdev->dev, in hclge_func_reset_cmd()
3538 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_do_reset()
3539 struct pci_dev *pdev = hdev->pdev; in hclge_do_reset()
3543 dev_info(&pdev->dev, "hardware reset not finish\n"); in hclge_do_reset()
3544 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", in hclge_do_reset()
3545 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), in hclge_do_reset()
3546 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); in hclge_do_reset()
3550 switch (hdev->reset_type) { in hclge_do_reset()
3552 dev_info(&pdev->dev, "global reset requested\n"); in hclge_do_reset()
3553 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); in hclge_do_reset()
3555 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); in hclge_do_reset()
3558 dev_info(&pdev->dev, "PF reset requested\n"); in hclge_do_reset()
3560 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); in hclge_do_reset()
3564 dev_warn(&pdev->dev, in hclge_do_reset()
3565 "unsupported reset type: %d\n", hdev->reset_type); in hclge_do_reset()
3574 struct hclge_dev *hdev = ae_dev->priv; in hclge_get_reset_level()
3578 u32 msix_sts_reg = hclge_read_dev(&hdev->hw, in hclge_get_reset_level()
3584 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n", in hclge_get_reset_level()
3595 hclge_enable_vector(&hdev->misc_vector, true); in hclge_get_reset_level()
3616 if (hdev->reset_type != HNAE3_NONE_RESET && in hclge_get_reset_level()
3617 rst_level < hdev->reset_type) in hclge_get_reset_level()
3627 switch (hdev->reset_type) { in hclge_clear_reset_cause()
3644 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_clear_reset_cause()
3645 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, in hclge_clear_reset_cause()
3648 hclge_enable_vector(&hdev->misc_vector, true); in hclge_clear_reset_cause()
3655 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG); in hclge_reset_handshake()
3661 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val); in hclge_reset_handshake()
3682 switch (hdev->reset_type) { in hclge_reset_prepare_wait()
3690 dev_err(&hdev->pdev->dev, in hclge_reset_prepare_wait()
3700 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); in hclge_reset_prepare_wait()
3701 hdev->rst_stats.pf_rst_cnt++; in hclge_reset_prepare_wait()
3710 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_reset_prepare_wait()
3711 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, in hclge_reset_prepare_wait()
3721 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); in hclge_reset_prepare_wait()
3730 if (hdev->reset_pending) { in hclge_reset_err_handle()
3731 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", in hclge_reset_err_handle()
3732 hdev->reset_pending); in hclge_reset_err_handle()
3734 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & in hclge_reset_err_handle()
3736 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
3740 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { in hclge_reset_err_handle()
3741 hdev->rst_stats.reset_fail_cnt++; in hclge_reset_err_handle()
3742 set_bit(hdev->reset_type, &hdev->reset_pending); in hclge_reset_err_handle()
3743 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
3744 "re-schedule reset task(%u)\n", in hclge_reset_err_handle()
3745 hdev->rst_stats.reset_fail_cnt); in hclge_reset_err_handle()
3754 dev_err(&hdev->pdev->dev, "Reset fail!\n"); in hclge_reset_err_handle()
3758 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_reset_err_handle()
3771 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; in hclge_set_rst_done()
3773 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rst_done()
3778 if (ret == -EOPNOTSUPP) { in hclge_set_rst_done()
3779 dev_warn(&hdev->pdev->dev, in hclge_set_rst_done()
3784 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", in hclge_set_rst_done()
3795 switch (hdev->reset_type) { in hclge_reset_prepare_up()
3808 /* clear up the handshake status after re-initialize done */ in hclge_reset_prepare_up()
3822 ret = hclge_reset_ae_dev(hdev->ae_dev); in hclge_reset_stack()
3833 hdev->rst_stats.reset_cnt++; in hclge_reset_prepare()
3850 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_reset_rebuild()
3854 hdev->rst_stats.hw_reset_done_cnt++; in hclge_reset_rebuild()
3869 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 in hclge_reset_rebuild()
3873 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) in hclge_reset_rebuild()
3890 hdev->last_reset_time = jiffies; in hclge_reset_rebuild()
3891 hdev->rst_stats.reset_fail_cnt = 0; in hclge_reset_rebuild()
3892 hdev->rst_stats.reset_done_cnt++; in hclge_reset_rebuild()
3893 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_reset_rebuild()
3900 &hdev->default_reset_request); in hclge_reset_rebuild()
3902 set_bit(reset_level, &hdev->reset_request); in hclge_reset_rebuild()
3928 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_event()
3946 handle = &hdev->vport[0].nic; in hclge_reset_event()
3948 if (time_before(jiffies, (hdev->last_reset_time + in hclge_reset_event()
3950 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_reset_event()
3952 } else if (hdev->default_reset_request) { in hclge_reset_event()
3953 hdev->reset_level = in hclge_reset_event()
3955 &hdev->default_reset_request); in hclge_reset_event()
3956 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { in hclge_reset_event()
3957 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_reset_event()
3960 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", in hclge_reset_event()
3961 hdev->reset_level); in hclge_reset_event()
3964 set_bit(hdev->reset_level, &hdev->reset_request); in hclge_reset_event()
3967 if (hdev->reset_level < HNAE3_GLOBAL_RESET) in hclge_reset_event()
3968 hdev->reset_level++; in hclge_reset_event()
3974 struct hclge_dev *hdev = ae_dev->priv; in hclge_set_def_reset_request()
3976 set_bit(rst_type, &hdev->default_reset_request); in hclge_set_def_reset_request()
3986 if (!hdev->default_reset_request) in hclge_reset_timer()
3989 dev_info(&hdev->pdev->dev, in hclge_reset_timer()
3991 hclge_reset_event(hdev->pdev, NULL); in hclge_reset_timer()
3996 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_reset_subtask()
4004 * b. else, we can come back later to check this status so re-sched in hclge_reset_subtask()
4007 hdev->last_reset_time = jiffies; in hclge_reset_subtask()
4008 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); in hclge_reset_subtask()
4009 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
4013 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); in hclge_reset_subtask()
4014 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
4017 hdev->reset_type = HNAE3_NONE_RESET; in hclge_reset_subtask()
4022 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) in hclge_reset_service_task()
4025 down(&hdev->reset_sem); in hclge_reset_service_task()
4026 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
4030 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
4031 up(&hdev->reset_sem); in hclge_reset_service_task()
4039 for (i = 1; i < hdev->num_alloc_vport; i++) { in hclge_update_vport_alive()
4040 struct hclge_vport *vport = &hdev->vport[i]; in hclge_update_vport_alive()
4042 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) in hclge_update_vport_alive()
4043 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_update_vport_alive()
4046 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) in hclge_update_vport_alive()
4047 vport->mps = HCLGE_MAC_DEFAULT_FRAME; in hclge_update_vport_alive()
4055 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) in hclge_periodic_service_task()
4065 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { in hclge_periodic_service_task()
4066 delta = jiffies - hdev->last_serv_processed; in hclge_periodic_service_task()
4069 delta = round_jiffies_relative(HZ) - delta; in hclge_periodic_service_task()
4074 hdev->serv_processed_cnt++; in hclge_periodic_service_task()
4077 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { in hclge_periodic_service_task()
4078 hdev->last_serv_processed = jiffies; in hclge_periodic_service_task()
4082 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) in hclge_periodic_service_task()
4088 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) in hclge_periodic_service_task()
4091 hdev->last_serv_processed = jiffies; in hclge_periodic_service_task()
4117 if (!handle->client) in hclge_get_vport()
4119 else if (handle->client->type == HNAE3_CLIENT_ROCE) in hclge_get_vport()
4130 struct hclge_dev *hdev = vport->back; in hclge_get_vector()
4134 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); in hclge_get_vector()
4135 vector_num = min(hdev->num_msi_left, vector_num); in hclge_get_vector()
4138 for (i = 1; i < hdev->num_msi; i++) { in hclge_get_vector()
4139 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { in hclge_get_vector()
4140 vector->vector = pci_irq_vector(hdev->pdev, i); in hclge_get_vector()
4141 vector->io_addr = hdev->hw.io_base + in hclge_get_vector()
4143 (i - 1) * HCLGE_VECTOR_REG_OFFSET + in hclge_get_vector()
4144 vport->vport_id * in hclge_get_vector()
4146 hdev->vector_status[i] = vport->vport_id; in hclge_get_vector()
4147 hdev->vector_irq[i] = vector->vector; in hclge_get_vector()
4156 hdev->num_msi_left -= alloc; in hclge_get_vector()
4157 hdev->num_msi_used += alloc; in hclge_get_vector()
4166 for (i = 0; i < hdev->num_msi; i++) in hclge_get_vector_index()
4167 if (vector == hdev->vector_irq[i]) in hclge_get_vector_index()
4170 return -EINVAL; in hclge_get_vector_index()
4176 struct hclge_dev *hdev = vport->back; in hclge_put_vector()
4181 dev_err(&hdev->pdev->dev, in hclge_put_vector()
4218 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); in hclge_set_rss_algo_key()
4219 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); in hclge_set_rss_algo_key()
4222 memcpy(req->hash_key, in hclge_set_rss_algo_key()
4225 key_counts -= key_size; in hclge_set_rss_algo_key()
4227 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rss_algo_key()
4229 dev_err(&hdev->pdev->dev, in hclge_set_rss_algo_key()
4251 req->start_table_index = in hclge_set_rss_indir_table()
4253 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); in hclge_set_rss_indir_table()
4256 req->rss_result[j] = in hclge_set_rss_indir_table()
4259 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rss_indir_table()
4261 dev_err(&hdev->pdev->dev, in hclge_set_rss_indir_table()
4290 req->rss_tc_mode[i] = cpu_to_le16(mode); in hclge_set_rss_tc_mode()
4293 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rss_tc_mode()
4295 dev_err(&hdev->pdev->dev, in hclge_set_rss_tc_mode()
4303 if (vport->rss_tuple_sets.ipv4_tcp_en || in hclge_get_rss_type()
4304 vport->rss_tuple_sets.ipv4_udp_en || in hclge_get_rss_type()
4305 vport->rss_tuple_sets.ipv4_sctp_en || in hclge_get_rss_type()
4306 vport->rss_tuple_sets.ipv6_tcp_en || in hclge_get_rss_type()
4307 vport->rss_tuple_sets.ipv6_udp_en || in hclge_get_rss_type()
4308 vport->rss_tuple_sets.ipv6_sctp_en) in hclge_get_rss_type()
4309 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; in hclge_get_rss_type()
4310 else if (vport->rss_tuple_sets.ipv4_fragment_en || in hclge_get_rss_type()
4311 vport->rss_tuple_sets.ipv6_fragment_en) in hclge_get_rss_type()
4312 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; in hclge_get_rss_type()
4314 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; in hclge_get_rss_type()
4328 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; in hclge_set_rss_input_tuple()
4329 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; in hclge_set_rss_input_tuple()
4330 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; in hclge_set_rss_input_tuple()
4331 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; in hclge_set_rss_input_tuple()
4332 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; in hclge_set_rss_input_tuple()
4333 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; in hclge_set_rss_input_tuple()
4334 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; in hclge_set_rss_input_tuple()
4335 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; in hclge_set_rss_input_tuple()
4336 hclge_get_rss_type(&hdev->vport[0]); in hclge_set_rss_input_tuple()
4337 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rss_input_tuple()
4339 dev_err(&hdev->pdev->dev, in hclge_set_rss_input_tuple()
4352 switch (vport->rss_algo) { in hclge_get_rss()
4367 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); in hclge_get_rss()
4372 indir[i] = vport->rss_indirection_tbl[i]; in hclge_get_rss()
4381 struct hclge_dev *hdev = vport->back; in hclge_set_rss()
4395 hash_algo = vport->rss_algo; in hclge_set_rss()
4398 return -EINVAL; in hclge_set_rss()
4406 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); in hclge_set_rss()
4407 vport->rss_algo = hash_algo; in hclge_set_rss()
4412 vport->rss_indirection_tbl[i] = indir[i]; in hclge_set_rss()
4415 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); in hclge_set_rss()
4420 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; in hclge_get_rss_hash_bits()
4422 if (nfc->data & RXH_L4_B_2_3) in hclge_get_rss_hash_bits()
4427 if (nfc->data & RXH_IP_SRC) in hclge_get_rss_hash_bits()
4432 if (nfc->data & RXH_IP_DST) in hclge_get_rss_hash_bits()
4437 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) in hclge_get_rss_hash_bits()
4447 struct hclge_dev *hdev = vport->back; in hclge_set_rss_tuple()
4453 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | in hclge_set_rss_tuple()
4455 return -EINVAL; in hclge_set_rss_tuple()
4460 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; in hclge_set_rss_tuple()
4461 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; in hclge_set_rss_tuple()
4462 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; in hclge_set_rss_tuple()
4463 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; in hclge_set_rss_tuple()
4464 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; in hclge_set_rss_tuple()
4465 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; in hclge_set_rss_tuple()
4466 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; in hclge_set_rss_tuple()
4467 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; in hclge_set_rss_tuple()
4470 switch (nfc->flow_type) { in hclge_set_rss_tuple()
4472 req->ipv4_tcp_en = tuple_sets; in hclge_set_rss_tuple()
4475 req->ipv6_tcp_en = tuple_sets; in hclge_set_rss_tuple()
4478 req->ipv4_udp_en = tuple_sets; in hclge_set_rss_tuple()
4481 req->ipv6_udp_en = tuple_sets; in hclge_set_rss_tuple()
4484 req->ipv4_sctp_en = tuple_sets; in hclge_set_rss_tuple()
4487 if ((nfc->data & RXH_L4_B_0_1) || in hclge_set_rss_tuple()
4488 (nfc->data & RXH_L4_B_2_3)) in hclge_set_rss_tuple()
4489 return -EINVAL; in hclge_set_rss_tuple()
4491 req->ipv6_sctp_en = tuple_sets; in hclge_set_rss_tuple()
4494 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; in hclge_set_rss_tuple()
4497 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; in hclge_set_rss_tuple()
4500 return -EINVAL; in hclge_set_rss_tuple()
4503 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rss_tuple()
4505 dev_err(&hdev->pdev->dev, in hclge_set_rss_tuple()
4510 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; in hclge_set_rss_tuple()
4511 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; in hclge_set_rss_tuple()
4512 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; in hclge_set_rss_tuple()
4513 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; in hclge_set_rss_tuple()
4514 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; in hclge_set_rss_tuple()
4515 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; in hclge_set_rss_tuple()
4516 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; in hclge_set_rss_tuple()
4517 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; in hclge_set_rss_tuple()
4528 nfc->data = 0; in hclge_get_rss_tuple()
4530 switch (nfc->flow_type) { in hclge_get_rss_tuple()
4532 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; in hclge_get_rss_tuple()
4535 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; in hclge_get_rss_tuple()
4538 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; in hclge_get_rss_tuple()
4541 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; in hclge_get_rss_tuple()
4544 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; in hclge_get_rss_tuple()
4547 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; in hclge_get_rss_tuple()
4554 return -EINVAL; in hclge_get_rss_tuple()
4561 nfc->data |= RXH_L4_B_2_3; in hclge_get_rss_tuple()
4563 nfc->data |= RXH_L4_B_0_1; in hclge_get_rss_tuple()
4565 nfc->data |= RXH_IP_DST; in hclge_get_rss_tuple()
4567 nfc->data |= RXH_IP_SRC; in hclge_get_rss_tuple()
4575 struct hclge_dev *hdev = vport->back; in hclge_get_tc_size()
4577 return hdev->rss_size_max; in hclge_get_tc_size()
4582 struct hclge_vport *vport = hdev->vport; in hclge_rss_init_hw()
4611 dev_err(&hdev->pdev->dev, in hclge_rss_init_hw()
4614 return -EINVAL; in hclge_rss_init_hw()
4623 if (!(hdev->hw_tc_map & BIT(i))) in hclge_rss_init_hw()
4636 struct hclge_vport *vport = hdev->vport; in hclge_rss_indir_init_cfg()
4639 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { in hclge_rss_indir_init_cfg()
4649 struct hclge_vport *vport = hdev->vport; in hclge_rss_init_cfg()
4651 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) in hclge_rss_init_cfg()
4654 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { in hclge_rss_init_cfg()
4685 struct hclge_dev *hdev = vport->back; in hclge_bind_ring_with_vector()
4697 req->int_vector_id = vector_id; in hclge_bind_ring_with_vector()
4700 for (node = ring_chain; node; node = node->next) { in hclge_bind_ring_with_vector()
4701 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); in hclge_bind_ring_with_vector()
4704 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); in hclge_bind_ring_with_vector()
4706 HCLGE_TQP_ID_S, node->tqp_index); in hclge_bind_ring_with_vector()
4709 hnae3_get_field(node->int_gl_idx, in hclge_bind_ring_with_vector()
4712 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); in hclge_bind_ring_with_vector()
4714 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; in hclge_bind_ring_with_vector()
4715 req->vfid = vport->vport_id; in hclge_bind_ring_with_vector()
4717 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
4719 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
4722 return -EIO; in hclge_bind_ring_with_vector()
4729 req->int_vector_id = vector_id; in hclge_bind_ring_with_vector()
4734 req->int_cause_num = i; in hclge_bind_ring_with_vector()
4735 req->vfid = vport->vport_id; in hclge_bind_ring_with_vector()
4736 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
4738 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
4740 return -EIO; in hclge_bind_ring_with_vector()
4751 struct hclge_dev *hdev = vport->back; in hclge_map_ring_to_vector()
4756 dev_err(&hdev->pdev->dev, in hclge_map_ring_to_vector()
4768 struct hclge_dev *hdev = vport->back; in hclge_unmap_ring_frm_vector()
4771 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_unmap_ring_frm_vector()
4776 dev_err(&handle->pdev->dev, in hclge_unmap_ring_frm_vector()
4783 dev_err(&handle->pdev->dev, in hclge_unmap_ring_frm_vector()
4800 req->vf_id = param->vf_id; in hclge_cmd_set_promisc_mode()
4807 req->flag = (param->enable << HCLGE_PROMISC_EN_B) | in hclge_cmd_set_promisc_mode()
4810 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_set_promisc_mode()
4812 dev_err(&hdev->pdev->dev, in hclge_cmd_set_promisc_mode()
4814 param->vf_id, ret); in hclge_cmd_set_promisc_mode()
4828 param->enable = HCLGE_PROMISC_EN_UC; in hclge_promisc_param_init()
4830 param->enable |= HCLGE_PROMISC_EN_MC; in hclge_promisc_param_init()
4832 param->enable |= HCLGE_PROMISC_EN_BC; in hclge_promisc_param_init()
4833 param->vf_id = vport_id; in hclge_promisc_param_init()
4839 struct hclge_dev *hdev = vport->back; in hclge_set_vport_promisc_mode()
4843 vport->vport_id); in hclge_set_vport_promisc_mode()
4851 struct hclge_dev *hdev = vport->back; in hclge_set_promisc_mode()
4858 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_promisc_mode()
4859 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; in hclge_set_promisc_mode()
4868 struct hclge_dev *hdev = vport->back; in hclge_request_update_promisc_mode()
4870 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); in hclge_request_update_promisc_mode()
4883 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_mode()
4885 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); in hclge_get_fd_mode()
4889 *fd_mode = req->mode; in hclge_get_fd_mode()
4908 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_allocation()
4910 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", in hclge_get_fd_allocation()
4915 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); in hclge_get_fd_allocation()
4916 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); in hclge_get_fd_allocation()
4917 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); in hclge_get_fd_allocation()
4918 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); in hclge_get_fd_allocation()
4934 stage = &hdev->fd_cfg.key_cfg[stage_num]; in hclge_set_fd_key_config()
4935 req->stage = stage_num; in hclge_set_fd_key_config()
4936 req->key_select = stage->key_sel; in hclge_set_fd_key_config()
4937 req->inner_sipv6_word_en = stage->inner_sipv6_word_en; in hclge_set_fd_key_config()
4938 req->inner_dipv6_word_en = stage->inner_dipv6_word_en; in hclge_set_fd_key_config()
4939 req->outer_sipv6_word_en = stage->outer_sipv6_word_en; in hclge_set_fd_key_config()
4940 req->outer_dipv6_word_en = stage->outer_dipv6_word_en; in hclge_set_fd_key_config()
4941 req->tuple_mask = cpu_to_le32(~stage->tuple_active); in hclge_set_fd_key_config()
4942 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); in hclge_set_fd_key_config()
4944 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fd_key_config()
4946 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); in hclge_set_fd_key_config()
4960 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
4964 switch (hdev->fd_cfg.fd_mode) { in hclge_init_fd_config()
4966 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; in hclge_init_fd_config()
4969 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; in hclge_init_fd_config()
4972 dev_err(&hdev->pdev->dev, in hclge_init_fd_config()
4974 hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
4975 return -EOPNOTSUPP; in hclge_init_fd_config()
4978 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; in hclge_init_fd_config()
4979 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, in hclge_init_fd_config()
4980 key_cfg->inner_sipv6_word_en = LOW_2_WORDS; in hclge_init_fd_config()
4981 key_cfg->inner_dipv6_word_en = LOW_2_WORDS; in hclge_init_fd_config()
4982 key_cfg->outer_sipv6_word_en = 0; in hclge_init_fd_config()
4983 key_cfg->outer_dipv6_word_en = 0; in hclge_init_fd_config()
4985 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | in hclge_init_fd_config()
4991 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) in hclge_init_fd_config()
4992 key_cfg->tuple_active |= in hclge_init_fd_config()
4998 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); in hclge_init_fd_config()
5001 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
5002 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], in hclge_init_fd_config()
5003 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
5004 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); in hclge_init_fd_config()
5030 req1->stage = stage; in hclge_fd_tcam_config()
5031 req1->xy_sel = sel_x ? 1 : 0; in hclge_fd_tcam_config()
5032 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); in hclge_fd_tcam_config()
5033 req1->index = cpu_to_le32(loc); in hclge_fd_tcam_config()
5034 req1->entry_vld = sel_x ? is_add : 0; in hclge_fd_tcam_config()
5037 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); in hclge_fd_tcam_config()
5038 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], in hclge_fd_tcam_config()
5039 sizeof(req2->tcam_data)); in hclge_fd_tcam_config()
5040 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + in hclge_fd_tcam_config()
5041 sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); in hclge_fd_tcam_config()
5044 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_fd_tcam_config()
5046 dev_err(&hdev->pdev->dev, in hclge_fd_tcam_config()
5047 "config tcam key fail, ret=%d\n", in hclge_fd_tcam_config()
5064 req->index = cpu_to_le32(loc); in hclge_fd_ad_config()
5065 req->stage = stage; in hclge_fd_ad_config()
5068 action->write_rule_id_to_bd); in hclge_fd_ad_config()
5070 action->rule_id); in hclge_fd_ad_config()
5072 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); in hclge_fd_ad_config()
5074 action->forward_to_direct_queue); in hclge_fd_ad_config()
5076 action->queue_id); in hclge_fd_ad_config()
5077 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); in hclge_fd_ad_config()
5079 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); in hclge_fd_ad_config()
5080 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); in hclge_fd_ad_config()
5082 action->counter_id); in hclge_fd_ad_config()
5084 req->ad_data = cpu_to_le64(ad_data); in hclge_fd_ad_config()
5085 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_fd_ad_config()
5087 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); in hclge_fd_ad_config()
5099 if (rule->unused_tuple & tuple_bit) in hclge_fd_convert_tuple()
5105 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], in hclge_fd_convert_tuple()
5106 rule->tuples_mask.dst_mac[i]); in hclge_fd_convert_tuple()
5107 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], in hclge_fd_convert_tuple()
5108 rule->tuples_mask.dst_mac[i]); in hclge_fd_convert_tuple()
5114 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], in hclge_fd_convert_tuple()
5115 rule->tuples.src_mac[i]); in hclge_fd_convert_tuple()
5116 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], in hclge_fd_convert_tuple()
5117 rule->tuples.src_mac[i]); in hclge_fd_convert_tuple()
5122 calc_x(tmp_x_s, rule->tuples.vlan_tag1, in hclge_fd_convert_tuple()
5123 rule->tuples_mask.vlan_tag1); in hclge_fd_convert_tuple()
5124 calc_y(tmp_y_s, rule->tuples.vlan_tag1, in hclge_fd_convert_tuple()
5125 rule->tuples_mask.vlan_tag1); in hclge_fd_convert_tuple()
5131 calc_x(tmp_x_s, rule->tuples.ether_proto, in hclge_fd_convert_tuple()
5132 rule->tuples_mask.ether_proto); in hclge_fd_convert_tuple()
5133 calc_y(tmp_y_s, rule->tuples.ether_proto, in hclge_fd_convert_tuple()
5134 rule->tuples_mask.ether_proto); in hclge_fd_convert_tuple()
5140 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); in hclge_fd_convert_tuple()
5141 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); in hclge_fd_convert_tuple()
5145 calc_x(*key_x, rule->tuples.ip_proto, in hclge_fd_convert_tuple()
5146 rule->tuples_mask.ip_proto); in hclge_fd_convert_tuple()
5147 calc_y(*key_y, rule->tuples.ip_proto, in hclge_fd_convert_tuple()
5148 rule->tuples_mask.ip_proto); in hclge_fd_convert_tuple()
5152 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX], in hclge_fd_convert_tuple()
5153 rule->tuples_mask.src_ip[IPV4_INDEX]); in hclge_fd_convert_tuple()
5154 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX], in hclge_fd_convert_tuple()
5155 rule->tuples_mask.src_ip[IPV4_INDEX]); in hclge_fd_convert_tuple()
5161 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX], in hclge_fd_convert_tuple()
5162 rule->tuples_mask.dst_ip[IPV4_INDEX]); in hclge_fd_convert_tuple()
5163 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX], in hclge_fd_convert_tuple()
5164 rule->tuples_mask.dst_ip[IPV4_INDEX]); in hclge_fd_convert_tuple()
5170 calc_x(tmp_x_s, rule->tuples.src_port, in hclge_fd_convert_tuple()
5171 rule->tuples_mask.src_port); in hclge_fd_convert_tuple()
5172 calc_y(tmp_y_s, rule->tuples.src_port, in hclge_fd_convert_tuple()
5173 rule->tuples_mask.src_port); in hclge_fd_convert_tuple()
5179 calc_x(tmp_x_s, rule->tuples.dst_port, in hclge_fd_convert_tuple()
5180 rule->tuples_mask.dst_port); in hclge_fd_convert_tuple()
5181 calc_y(tmp_y_s, rule->tuples.dst_port, in hclge_fd_convert_tuple()
5182 rule->tuples_mask.dst_port); in hclge_fd_convert_tuple()
5222 tuple_bit = key_cfg->meta_data_active & BIT(i); in hclge_fd_convert_meta_data()
5231 rule->vf_id, 0); in hclge_fd_convert_meta_data()
5244 shift_bits = sizeof(meta_data) * 8 - cur_pos; in hclge_fd_convert_meta_data()
5257 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; in hclge_config_key()
5275 check_tuple = key_cfg->tuple_active & BIT(i); in hclge_config_key()
5285 meta_data_region = hdev->fd_cfg.max_key_length / 8 - in hclge_config_key()
5293 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, in hclge_config_key()
5296 dev_err(&hdev->pdev->dev, in hclge_config_key()
5298 rule->queue_id, ret); in hclge_config_key()
5302 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, in hclge_config_key()
5305 dev_err(&hdev->pdev->dev, in hclge_config_key()
5307 rule->queue_id, ret); in hclge_config_key()
5316 ad_data.ad_id = rule->location; in hclge_config_action()
5318 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { in hclge_config_action()
5325 ad_data.queue_id = rule->queue_id; in hclge_config_action()
5335 ad_data.rule_id = rule->location; in hclge_config_action()
5344 return -EINVAL; in hclge_fd_check_tcpip4_tuple()
5348 if (!spec->ip4src) in hclge_fd_check_tcpip4_tuple()
5351 if (!spec->ip4dst) in hclge_fd_check_tcpip4_tuple()
5354 if (!spec->psrc) in hclge_fd_check_tcpip4_tuple()
5357 if (!spec->pdst) in hclge_fd_check_tcpip4_tuple()
5360 if (!spec->tos) in hclge_fd_check_tcpip4_tuple()
5370 return -EINVAL; in hclge_fd_check_ip4_tuple()
5375 if (!spec->ip4src) in hclge_fd_check_ip4_tuple()
5378 if (!spec->ip4dst) in hclge_fd_check_ip4_tuple()
5381 if (!spec->tos) in hclge_fd_check_ip4_tuple()
5384 if (!spec->proto) in hclge_fd_check_ip4_tuple()
5387 if (spec->l4_4_bytes) in hclge_fd_check_ip4_tuple()
5388 return -EOPNOTSUPP; in hclge_fd_check_ip4_tuple()
5390 if (spec->ip_ver != ETH_RX_NFC_IP4) in hclge_fd_check_ip4_tuple()
5391 return -EOPNOTSUPP; in hclge_fd_check_ip4_tuple()
5400 return -EINVAL; in hclge_fd_check_tcpip6_tuple()
5406 if (!spec->ip6src[0] && !spec->ip6src[1] && in hclge_fd_check_tcpip6_tuple()
5407 !spec->ip6src[2] && !spec->ip6src[3]) in hclge_fd_check_tcpip6_tuple()
5410 if (!spec->ip6dst[0] && !spec->ip6dst[1] && in hclge_fd_check_tcpip6_tuple()
5411 !spec->ip6dst[2] && !spec->ip6dst[3]) in hclge_fd_check_tcpip6_tuple()
5414 if (!spec->psrc) in hclge_fd_check_tcpip6_tuple()
5417 if (!spec->pdst) in hclge_fd_check_tcpip6_tuple()
5420 if (spec->tclass) in hclge_fd_check_tcpip6_tuple()
5421 return -EOPNOTSUPP; in hclge_fd_check_tcpip6_tuple()
5430 return -EINVAL; in hclge_fd_check_ip6_tuple()
5436 if (!spec->ip6src[0] && !spec->ip6src[1] && in hclge_fd_check_ip6_tuple()
5437 !spec->ip6src[2] && !spec->ip6src[3]) in hclge_fd_check_ip6_tuple()
5440 if (!spec->ip6dst[0] && !spec->ip6dst[1] && in hclge_fd_check_ip6_tuple()
5441 !spec->ip6dst[2] && !spec->ip6dst[3]) in hclge_fd_check_ip6_tuple()
5444 if (!spec->l4_proto) in hclge_fd_check_ip6_tuple()
5447 if (spec->tclass) in hclge_fd_check_ip6_tuple()
5448 return -EOPNOTSUPP; in hclge_fd_check_ip6_tuple()
5450 if (spec->l4_4_bytes) in hclge_fd_check_ip6_tuple()
5451 return -EOPNOTSUPP; in hclge_fd_check_ip6_tuple()
5459 return -EINVAL; in hclge_fd_check_ether_tuple()
5465 if (is_zero_ether_addr(spec->h_source)) in hclge_fd_check_ether_tuple()
5468 if (is_zero_ether_addr(spec->h_dest)) in hclge_fd_check_ether_tuple()
5471 if (!spec->h_proto) in hclge_fd_check_ether_tuple()
5481 if (fs->flow_type & FLOW_EXT) { in hclge_fd_check_ext_tuple()
5482 if (fs->h_ext.vlan_etype) { in hclge_fd_check_ext_tuple()
5483 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); in hclge_fd_check_ext_tuple()
5484 return -EOPNOTSUPP; in hclge_fd_check_ext_tuple()
5487 if (!fs->h_ext.vlan_tci) in hclge_fd_check_ext_tuple()
5490 if (fs->m_ext.vlan_tci && in hclge_fd_check_ext_tuple()
5491 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) { in hclge_fd_check_ext_tuple()
5492 dev_err(&hdev->pdev->dev, in hclge_fd_check_ext_tuple()
5494 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1); in hclge_fd_check_ext_tuple()
5495 return -EINVAL; in hclge_fd_check_ext_tuple()
5501 if (fs->flow_type & FLOW_MAC_EXT) { in hclge_fd_check_ext_tuple()
5502 if (hdev->fd_cfg.fd_mode != in hclge_fd_check_ext_tuple()
5504 dev_err(&hdev->pdev->dev, in hclge_fd_check_ext_tuple()
5506 return -EOPNOTSUPP; in hclge_fd_check_ext_tuple()
5509 if (is_zero_ether_addr(fs->h_ext.h_dest)) in hclge_fd_check_ext_tuple()
5525 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_fd_check_spec()
5526 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
5528 fs->location, in hclge_fd_check_spec()
5529 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); in hclge_fd_check_spec()
5530 return -EINVAL; in hclge_fd_check_spec()
5533 if ((fs->flow_type & FLOW_EXT) && in hclge_fd_check_spec()
5534 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { in hclge_fd_check_spec()
5535 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); in hclge_fd_check_spec()
5536 return -EOPNOTSUPP; in hclge_fd_check_spec()
5539 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); in hclge_fd_check_spec()
5544 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec, in hclge_fd_check_spec()
5548 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec, in hclge_fd_check_spec()
5554 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec, in hclge_fd_check_spec()
5558 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec, in hclge_fd_check_spec()
5562 if (hdev->fd_cfg.fd_mode != in hclge_fd_check_spec()
5564 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
5566 return -EOPNOTSUPP; in hclge_fd_check_spec()
5569 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec, in hclge_fd_check_spec()
5573 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
5576 return -EOPNOTSUPP; in hclge_fd_check_spec()
5580 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
5594 spin_lock_bh(&hdev->fd_rule_lock); in hclge_fd_rule_exist()
5595 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { in hclge_fd_rule_exist()
5596 if (rule->location >= location) in hclge_fd_rule_exist()
5600 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_fd_rule_exist()
5602 return rule && rule->location == location; in hclge_fd_rule_exist()
5615 return -EINVAL; in hclge_fd_update_rule_list()
5618 &hdev->fd_rule_list, rule_node) { in hclge_fd_update_rule_list()
5619 if (rule->location >= location) in hclge_fd_update_rule_list()
5624 if (rule && rule->location == location) { in hclge_fd_update_rule_list()
5625 hlist_del(&rule->rule_node); in hclge_fd_update_rule_list()
5627 hdev->hclge_fd_rule_num--; in hclge_fd_update_rule_list()
5630 if (!hdev->hclge_fd_rule_num) in hclge_fd_update_rule_list()
5631 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_fd_update_rule_list()
5632 clear_bit(location, hdev->fd_bmap); in hclge_fd_update_rule_list()
5637 dev_err(&hdev->pdev->dev, in hclge_fd_update_rule_list()
5640 return -EINVAL; in hclge_fd_update_rule_list()
5643 INIT_HLIST_NODE(&new_rule->rule_node); in hclge_fd_update_rule_list()
5646 hlist_add_behind(&new_rule->rule_node, &parent->rule_node); in hclge_fd_update_rule_list()
5648 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); in hclge_fd_update_rule_list()
5650 set_bit(location, hdev->fd_bmap); in hclge_fd_update_rule_list()
5651 hdev->hclge_fd_rule_num++; in hclge_fd_update_rule_list()
5652 hdev->fd_active_type = new_rule->rule_type; in hclge_fd_update_rule_list()
5661 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); in hclge_fd_get_tuple()
5667 rule->tuples.src_ip[IPV4_INDEX] = in hclge_fd_get_tuple()
5668 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); in hclge_fd_get_tuple()
5669 rule->tuples_mask.src_ip[IPV4_INDEX] = in hclge_fd_get_tuple()
5670 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); in hclge_fd_get_tuple()
5672 rule->tuples.dst_ip[IPV4_INDEX] = in hclge_fd_get_tuple()
5673 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); in hclge_fd_get_tuple()
5674 rule->tuples_mask.dst_ip[IPV4_INDEX] = in hclge_fd_get_tuple()
5675 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); in hclge_fd_get_tuple()
5677 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); in hclge_fd_get_tuple()
5678 rule->tuples_mask.src_port = in hclge_fd_get_tuple()
5679 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); in hclge_fd_get_tuple()
5681 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); in hclge_fd_get_tuple()
5682 rule->tuples_mask.dst_port = in hclge_fd_get_tuple()
5683 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); in hclge_fd_get_tuple()
5685 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; in hclge_fd_get_tuple()
5686 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; in hclge_fd_get_tuple()
5688 rule->tuples.ether_proto = ETH_P_IP; in hclge_fd_get_tuple()
5689 rule->tuples_mask.ether_proto = 0xFFFF; in hclge_fd_get_tuple()
5693 rule->tuples.src_ip[IPV4_INDEX] = in hclge_fd_get_tuple()
5694 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); in hclge_fd_get_tuple()
5695 rule->tuples_mask.src_ip[IPV4_INDEX] = in hclge_fd_get_tuple()
5696 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); in hclge_fd_get_tuple()
5698 rule->tuples.dst_ip[IPV4_INDEX] = in hclge_fd_get_tuple()
5699 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); in hclge_fd_get_tuple()
5700 rule->tuples_mask.dst_ip[IPV4_INDEX] = in hclge_fd_get_tuple()
5701 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); in hclge_fd_get_tuple()
5703 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; in hclge_fd_get_tuple()
5704 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; in hclge_fd_get_tuple()
5706 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; in hclge_fd_get_tuple()
5707 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; in hclge_fd_get_tuple()
5709 rule->tuples.ether_proto = ETH_P_IP; in hclge_fd_get_tuple()
5710 rule->tuples_mask.ether_proto = 0xFFFF; in hclge_fd_get_tuple()
5716 be32_to_cpu_array(rule->tuples.src_ip, in hclge_fd_get_tuple()
5717 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE); in hclge_fd_get_tuple()
5718 be32_to_cpu_array(rule->tuples_mask.src_ip, in hclge_fd_get_tuple()
5719 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE); in hclge_fd_get_tuple()
5721 be32_to_cpu_array(rule->tuples.dst_ip, in hclge_fd_get_tuple()
5722 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); in hclge_fd_get_tuple()
5723 be32_to_cpu_array(rule->tuples_mask.dst_ip, in hclge_fd_get_tuple()
5724 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); in hclge_fd_get_tuple()
5726 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); in hclge_fd_get_tuple()
5727 rule->tuples_mask.src_port = in hclge_fd_get_tuple()
5728 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); in hclge_fd_get_tuple()
5730 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); in hclge_fd_get_tuple()
5731 rule->tuples_mask.dst_port = in hclge_fd_get_tuple()
5732 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); in hclge_fd_get_tuple()
5734 rule->tuples.ether_proto = ETH_P_IPV6; in hclge_fd_get_tuple()
5735 rule->tuples_mask.ether_proto = 0xFFFF; in hclge_fd_get_tuple()
5739 be32_to_cpu_array(rule->tuples.src_ip, in hclge_fd_get_tuple()
5740 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE); in hclge_fd_get_tuple()
5741 be32_to_cpu_array(rule->tuples_mask.src_ip, in hclge_fd_get_tuple()
5742 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE); in hclge_fd_get_tuple()
5744 be32_to_cpu_array(rule->tuples.dst_ip, in hclge_fd_get_tuple()
5745 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE); in hclge_fd_get_tuple()
5746 be32_to_cpu_array(rule->tuples_mask.dst_ip, in hclge_fd_get_tuple()
5747 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE); in hclge_fd_get_tuple()
5749 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; in hclge_fd_get_tuple()
5750 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; in hclge_fd_get_tuple()
5752 rule->tuples.ether_proto = ETH_P_IPV6; in hclge_fd_get_tuple()
5753 rule->tuples_mask.ether_proto = 0xFFFF; in hclge_fd_get_tuple()
5757 ether_addr_copy(rule->tuples.src_mac, in hclge_fd_get_tuple()
5758 fs->h_u.ether_spec.h_source); in hclge_fd_get_tuple()
5759 ether_addr_copy(rule->tuples_mask.src_mac, in hclge_fd_get_tuple()
5760 fs->m_u.ether_spec.h_source); in hclge_fd_get_tuple()
5762 ether_addr_copy(rule->tuples.dst_mac, in hclge_fd_get_tuple()
5763 fs->h_u.ether_spec.h_dest); in hclge_fd_get_tuple()
5764 ether_addr_copy(rule->tuples_mask.dst_mac, in hclge_fd_get_tuple()
5765 fs->m_u.ether_spec.h_dest); in hclge_fd_get_tuple()
5767 rule->tuples.ether_proto = in hclge_fd_get_tuple()
5768 be16_to_cpu(fs->h_u.ether_spec.h_proto); in hclge_fd_get_tuple()
5769 rule->tuples_mask.ether_proto = in hclge_fd_get_tuple()
5770 be16_to_cpu(fs->m_u.ether_spec.h_proto); in hclge_fd_get_tuple()
5774 return -EOPNOTSUPP; in hclge_fd_get_tuple()
5780 rule->tuples.ip_proto = IPPROTO_SCTP; in hclge_fd_get_tuple()
5781 rule->tuples_mask.ip_proto = 0xFF; in hclge_fd_get_tuple()
5785 rule->tuples.ip_proto = IPPROTO_TCP; in hclge_fd_get_tuple()
5786 rule->tuples_mask.ip_proto = 0xFF; in hclge_fd_get_tuple()
5790 rule->tuples.ip_proto = IPPROTO_UDP; in hclge_fd_get_tuple()
5791 rule->tuples_mask.ip_proto = 0xFF; in hclge_fd_get_tuple()
5797 if (fs->flow_type & FLOW_EXT) { in hclge_fd_get_tuple()
5798 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); in hclge_fd_get_tuple()
5799 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); in hclge_fd_get_tuple()
5802 if (fs->flow_type & FLOW_MAC_EXT) { in hclge_fd_get_tuple()
5803 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); in hclge_fd_get_tuple()
5804 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); in hclge_fd_get_tuple()
5817 dev_err(&hdev->pdev->dev, in hclge_fd_config_rule()
5819 return -EINVAL; in hclge_fd_config_rule()
5823 hclge_fd_update_rule_list(hdev, rule, rule->location, true); in hclge_fd_config_rule()
5836 hclge_fd_update_rule_list(hdev, rule, rule->location, false); in hclge_fd_config_rule()
5844 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry()
5853 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
5855 return -EOPNOTSUPP; in hclge_add_fd_entry()
5858 if (!hdev->fd_en) { in hclge_add_fd_entry()
5859 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
5861 return -EOPNOTSUPP; in hclge_add_fd_entry()
5864 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; in hclge_add_fd_entry()
5870 if (fs->ring_cookie == RX_CLS_FLOW_DISC) { in hclge_add_fd_entry()
5873 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); in hclge_add_fd_entry()
5874 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); in hclge_add_fd_entry()
5877 if (vf > hdev->num_req_vfs) { in hclge_add_fd_entry()
5878 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
5880 vf, hdev->num_req_vfs); in hclge_add_fd_entry()
5881 return -EINVAL; in hclge_add_fd_entry()
5884 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; in hclge_add_fd_entry()
5885 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; in hclge_add_fd_entry()
5888 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
5890 ring, tqps - 1); in hclge_add_fd_entry()
5891 return -EINVAL; in hclge_add_fd_entry()
5900 return -ENOMEM; in hclge_add_fd_entry()
5908 rule->flow_type = fs->flow_type; in hclge_add_fd_entry()
5909 rule->location = fs->location; in hclge_add_fd_entry()
5910 rule->unused_tuple = unused; in hclge_add_fd_entry()
5911 rule->vf_id = dst_vport_id; in hclge_add_fd_entry()
5912 rule->queue_id = q_index; in hclge_add_fd_entry()
5913 rule->action = action; in hclge_add_fd_entry()
5914 rule->rule_type = HCLGE_FD_EP_ACTIVE; in hclge_add_fd_entry()
5919 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry()
5924 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry()
5933 struct hclge_dev *hdev = vport->back; in hclge_del_fd_entry()
5938 return -EOPNOTSUPP; in hclge_del_fd_entry()
5940 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; in hclge_del_fd_entry()
5942 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_del_fd_entry()
5943 return -EINVAL; in hclge_del_fd_entry()
5945 if (!hclge_fd_rule_exist(hdev, fs->location)) { in hclge_del_fd_entry()
5946 dev_err(&hdev->pdev->dev, in hclge_del_fd_entry()
5947 "Delete fail, rule %u is inexistent\n", fs->location); in hclge_del_fd_entry()
5948 return -ENOENT; in hclge_del_fd_entry()
5951 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, in hclge_del_fd_entry()
5956 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
5957 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false); in hclge_del_fd_entry()
5959 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
5969 struct hclge_dev *hdev = vport->back; in hclge_del_all_fd_entries()
5977 for_each_set_bit(location, hdev->fd_bmap, in hclge_del_all_fd_entries()
5978 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_del_all_fd_entries()
5983 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, in hclge_del_all_fd_entries()
5985 hlist_del(&rule->rule_node); in hclge_del_all_fd_entries()
5988 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_del_all_fd_entries()
5989 hdev->hclge_fd_rule_num = 0; in hclge_del_all_fd_entries()
5990 bitmap_zero(hdev->fd_bmap, in hclge_del_all_fd_entries()
5991 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); in hclge_del_all_fd_entries()
5998 struct hclge_dev *hdev = vport->back; in hclge_restore_fd_entries()
6011 if (!hdev->fd_en) in hclge_restore_fd_entries()
6014 spin_lock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
6015 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_restore_fd_entries()
6021 dev_warn(&hdev->pdev->dev, in hclge_restore_fd_entries()
6023 rule->location); in hclge_restore_fd_entries()
6024 clear_bit(rule->location, hdev->fd_bmap); in hclge_restore_fd_entries()
6025 hlist_del(&rule->rule_node); in hclge_restore_fd_entries()
6027 hdev->hclge_fd_rule_num--; in hclge_restore_fd_entries()
6031 if (hdev->hclge_fd_rule_num) in hclge_restore_fd_entries()
6032 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE; in hclge_restore_fd_entries()
6034 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
6043 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_cnt()
6046 return -EOPNOTSUPP; in hclge_get_fd_rule_cnt()
6048 cmd->rule_cnt = hdev->hclge_fd_rule_num; in hclge_get_fd_rule_cnt()
6049 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_fd_rule_cnt()
6058 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); in hclge_fd_get_tcpip4_info()
6059 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? in hclge_fd_get_tcpip4_info()
6060 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); in hclge_fd_get_tcpip4_info()
6062 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); in hclge_fd_get_tcpip4_info()
6063 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? in hclge_fd_get_tcpip4_info()
6064 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); in hclge_fd_get_tcpip4_info()
6066 spec->psrc = cpu_to_be16(rule->tuples.src_port); in hclge_fd_get_tcpip4_info()
6067 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? in hclge_fd_get_tcpip4_info()
6068 0 : cpu_to_be16(rule->tuples_mask.src_port); in hclge_fd_get_tcpip4_info()
6070 spec->pdst = cpu_to_be16(rule->tuples.dst_port); in hclge_fd_get_tcpip4_info()
6071 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? in hclge_fd_get_tcpip4_info()
6072 0 : cpu_to_be16(rule->tuples_mask.dst_port); in hclge_fd_get_tcpip4_info()
6074 spec->tos = rule->tuples.ip_tos; in hclge_fd_get_tcpip4_info()
6075 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? in hclge_fd_get_tcpip4_info()
6076 0 : rule->tuples_mask.ip_tos; in hclge_fd_get_tcpip4_info()
6083 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); in hclge_fd_get_ip4_info()
6084 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? in hclge_fd_get_ip4_info()
6085 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); in hclge_fd_get_ip4_info()
6087 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); in hclge_fd_get_ip4_info()
6088 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? in hclge_fd_get_ip4_info()
6089 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); in hclge_fd_get_ip4_info()
6091 spec->tos = rule->tuples.ip_tos; in hclge_fd_get_ip4_info()
6092 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? in hclge_fd_get_ip4_info()
6093 0 : rule->tuples_mask.ip_tos; in hclge_fd_get_ip4_info()
6095 spec->proto = rule->tuples.ip_proto; in hclge_fd_get_ip4_info()
6096 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? in hclge_fd_get_ip4_info()
6097 0 : rule->tuples_mask.ip_proto; in hclge_fd_get_ip4_info()
6099 spec->ip_ver = ETH_RX_NFC_IP4; in hclge_fd_get_ip4_info()
6106 cpu_to_be32_array(spec->ip6src, in hclge_fd_get_tcpip6_info()
6107 rule->tuples.src_ip, IPV6_SIZE); in hclge_fd_get_tcpip6_info()
6108 cpu_to_be32_array(spec->ip6dst, in hclge_fd_get_tcpip6_info()
6109 rule->tuples.dst_ip, IPV6_SIZE); in hclge_fd_get_tcpip6_info()
6110 if (rule->unused_tuple & BIT(INNER_SRC_IP)) in hclge_fd_get_tcpip6_info()
6111 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); in hclge_fd_get_tcpip6_info()
6113 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip, in hclge_fd_get_tcpip6_info()
6116 if (rule->unused_tuple & BIT(INNER_DST_IP)) in hclge_fd_get_tcpip6_info()
6117 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); in hclge_fd_get_tcpip6_info()
6119 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, in hclge_fd_get_tcpip6_info()
6122 spec->psrc = cpu_to_be16(rule->tuples.src_port); in hclge_fd_get_tcpip6_info()
6123 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? in hclge_fd_get_tcpip6_info()
6124 0 : cpu_to_be16(rule->tuples_mask.src_port); in hclge_fd_get_tcpip6_info()
6126 spec->pdst = cpu_to_be16(rule->tuples.dst_port); in hclge_fd_get_tcpip6_info()
6127 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? in hclge_fd_get_tcpip6_info()
6128 0 : cpu_to_be16(rule->tuples_mask.dst_port); in hclge_fd_get_tcpip6_info()
6135 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE); in hclge_fd_get_ip6_info()
6136 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE); in hclge_fd_get_ip6_info()
6137 if (rule->unused_tuple & BIT(INNER_SRC_IP)) in hclge_fd_get_ip6_info()
6138 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); in hclge_fd_get_ip6_info()
6140 cpu_to_be32_array(spec_mask->ip6src, in hclge_fd_get_ip6_info()
6141 rule->tuples_mask.src_ip, IPV6_SIZE); in hclge_fd_get_ip6_info()
6143 if (rule->unused_tuple & BIT(INNER_DST_IP)) in hclge_fd_get_ip6_info()
6144 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); in hclge_fd_get_ip6_info()
6146 cpu_to_be32_array(spec_mask->ip6dst, in hclge_fd_get_ip6_info()
6147 rule->tuples_mask.dst_ip, IPV6_SIZE); in hclge_fd_get_ip6_info()
6149 spec->l4_proto = rule->tuples.ip_proto; in hclge_fd_get_ip6_info()
6150 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? in hclge_fd_get_ip6_info()
6151 0 : rule->tuples_mask.ip_proto; in hclge_fd_get_ip6_info()
6158 ether_addr_copy(spec->h_source, rule->tuples.src_mac); in hclge_fd_get_ether_info()
6159 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac); in hclge_fd_get_ether_info()
6161 if (rule->unused_tuple & BIT(INNER_SRC_MAC)) in hclge_fd_get_ether_info()
6162 eth_zero_addr(spec_mask->h_source); in hclge_fd_get_ether_info()
6164 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac); in hclge_fd_get_ether_info()
6166 if (rule->unused_tuple & BIT(INNER_DST_MAC)) in hclge_fd_get_ether_info()
6167 eth_zero_addr(spec_mask->h_dest); in hclge_fd_get_ether_info()
6169 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac); in hclge_fd_get_ether_info()
6171 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto); in hclge_fd_get_ether_info()
6172 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? in hclge_fd_get_ether_info()
6173 0 : cpu_to_be16(rule->tuples_mask.ether_proto); in hclge_fd_get_ether_info()
6179 if (fs->flow_type & FLOW_EXT) { in hclge_fd_get_ext_info()
6180 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); in hclge_fd_get_ext_info()
6181 fs->m_ext.vlan_tci = in hclge_fd_get_ext_info()
6182 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? in hclge_fd_get_ext_info()
6184 cpu_to_be16(rule->tuples_mask.vlan_tag1); in hclge_fd_get_ext_info()
6187 if (fs->flow_type & FLOW_MAC_EXT) { in hclge_fd_get_ext_info()
6188 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); in hclge_fd_get_ext_info()
6189 if (rule->unused_tuple & BIT(INNER_DST_MAC)) in hclge_fd_get_ext_info()
6190 eth_zero_addr(fs->m_u.ether_spec.h_dest); in hclge_fd_get_ext_info()
6192 ether_addr_copy(fs->m_u.ether_spec.h_dest, in hclge_fd_get_ext_info()
6193 rule->tuples_mask.dst_mac); in hclge_fd_get_ext_info()
6202 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_info()
6207 return -EOPNOTSUPP; in hclge_get_fd_rule_info()
6209 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; in hclge_get_fd_rule_info()
6211 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6213 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { in hclge_get_fd_rule_info()
6214 if (rule->location >= fs->location) in hclge_get_fd_rule_info()
6218 if (!rule || fs->location != rule->location) { in hclge_get_fd_rule_info()
6219 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6221 return -ENOENT; in hclge_get_fd_rule_info()
6224 fs->flow_type = rule->flow_type; in hclge_get_fd_rule_info()
6225 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { in hclge_get_fd_rule_info()
6229 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec, in hclge_get_fd_rule_info()
6230 &fs->m_u.tcp_ip4_spec); in hclge_get_fd_rule_info()
6233 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec, in hclge_get_fd_rule_info()
6234 &fs->m_u.usr_ip4_spec); in hclge_get_fd_rule_info()
6239 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec, in hclge_get_fd_rule_info()
6240 &fs->m_u.tcp_ip6_spec); in hclge_get_fd_rule_info()
6243 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, in hclge_get_fd_rule_info()
6244 &fs->m_u.usr_ip6_spec); in hclge_get_fd_rule_info()
6251 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec, in hclge_get_fd_rule_info()
6252 &fs->m_u.ether_spec); in hclge_get_fd_rule_info()
6258 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { in hclge_get_fd_rule_info()
6259 fs->ring_cookie = RX_CLS_FLOW_DISC; in hclge_get_fd_rule_info()
6263 fs->ring_cookie = rule->queue_id; in hclge_get_fd_rule_info()
6264 vf_id = rule->vf_id; in hclge_get_fd_rule_info()
6266 fs->ring_cookie |= vf_id; in hclge_get_fd_rule_info()
6269 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6278 struct hclge_dev *hdev = vport->back; in hclge_get_all_rules()
6284 return -EOPNOTSUPP; in hclge_get_all_rules()
6286 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_all_rules()
6288 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
6290 &hdev->fd_rule_list, rule_node) { in hclge_get_all_rules()
6291 if (cnt == cmd->rule_cnt) { in hclge_get_all_rules()
6292 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
6293 return -EMSGSIZE; in hclge_get_all_rules()
6296 rule_locs[cnt] = rule->location; in hclge_get_all_rules()
6300 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
6302 cmd->rule_cnt = cnt; in hclge_get_all_rules()
6310 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 in hclge_fd_get_flow_tuples()
6311 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 in hclge_fd_get_flow_tuples()
6313 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); in hclge_fd_get_flow_tuples()
6314 tuples->ip_proto = fkeys->basic.ip_proto; in hclge_fd_get_flow_tuples()
6315 tuples->dst_port = be16_to_cpu(fkeys->ports.dst); in hclge_fd_get_flow_tuples()
6317 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { in hclge_fd_get_flow_tuples()
6318 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); in hclge_fd_get_flow_tuples()
6319 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); in hclge_fd_get_flow_tuples()
6324 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); in hclge_fd_get_flow_tuples()
6325 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); in hclge_fd_get_flow_tuples()
6338 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_fd_search_flow_keys()
6339 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples))) in hclge_fd_search_flow_keys()
6349 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | in hclge_fd_build_arfs_rule()
6352 rule->action = 0; in hclge_fd_build_arfs_rule()
6353 rule->vf_id = 0; in hclge_fd_build_arfs_rule()
6354 rule->rule_type = HCLGE_FD_ARFS_ACTIVE; in hclge_fd_build_arfs_rule()
6355 if (tuples->ether_proto == ETH_P_IP) { in hclge_fd_build_arfs_rule()
6356 if (tuples->ip_proto == IPPROTO_TCP) in hclge_fd_build_arfs_rule()
6357 rule->flow_type = TCP_V4_FLOW; in hclge_fd_build_arfs_rule()
6359 rule->flow_type = UDP_V4_FLOW; in hclge_fd_build_arfs_rule()
6361 if (tuples->ip_proto == IPPROTO_TCP) in hclge_fd_build_arfs_rule()
6362 rule->flow_type = TCP_V6_FLOW; in hclge_fd_build_arfs_rule()
6364 rule->flow_type = UDP_V6_FLOW; in hclge_fd_build_arfs_rule()
6366 memcpy(&rule->tuples, tuples, sizeof(rule->tuples)); in hclge_fd_build_arfs_rule()
6367 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask)); in hclge_fd_build_arfs_rule()
6375 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry_by_arfs()
6382 return -EOPNOTSUPP; in hclge_add_fd_entry_by_arfs()
6387 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
6388 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { in hclge_add_fd_entry_by_arfs()
6389 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
6390 return -EOPNOTSUPP; in hclge_add_fd_entry_by_arfs()
6402 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); in hclge_add_fd_entry_by_arfs()
6403 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_add_fd_entry_by_arfs()
6404 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
6405 return -ENOSPC; in hclge_add_fd_entry_by_arfs()
6410 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
6411 return -ENOMEM; in hclge_add_fd_entry_by_arfs()
6414 set_bit(bit_id, hdev->fd_bmap); in hclge_add_fd_entry_by_arfs()
6415 rule->location = bit_id; in hclge_add_fd_entry_by_arfs()
6416 rule->flow_id = flow_id; in hclge_add_fd_entry_by_arfs()
6417 rule->queue_id = queue_id; in hclge_add_fd_entry_by_arfs()
6421 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
6426 return rule->location; in hclge_add_fd_entry_by_arfs()
6429 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
6431 if (rule->queue_id == queue_id) in hclge_add_fd_entry_by_arfs()
6432 return rule->location; in hclge_add_fd_entry_by_arfs()
6434 tmp_queue_id = rule->queue_id; in hclge_add_fd_entry_by_arfs()
6435 rule->queue_id = queue_id; in hclge_add_fd_entry_by_arfs()
6438 rule->queue_id = tmp_queue_id; in hclge_add_fd_entry_by_arfs()
6442 return rule->location; in hclge_add_fd_entry_by_arfs()
6448 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_rfs_filter_expire()
6453 spin_lock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
6454 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { in hclge_rfs_filter_expire()
6455 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
6458 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_rfs_filter_expire()
6459 if (rps_may_expire_flow(handle->netdev, rule->queue_id, in hclge_rfs_filter_expire()
6460 rule->flow_id, rule->location)) { in hclge_rfs_filter_expire()
6461 hlist_del_init(&rule->rule_node); in hclge_rfs_filter_expire()
6462 hlist_add_head(&rule->rule_node, &del_list); in hclge_rfs_filter_expire()
6463 hdev->hclge_fd_rule_num--; in hclge_rfs_filter_expire()
6464 clear_bit(rule->location, hdev->fd_bmap); in hclge_rfs_filter_expire()
6467 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
6471 rule->location, NULL, false); in hclge_rfs_filter_expire()
6482 struct hclge_dev *hdev = vport->back; in hclge_clear_arfs_rules()
6484 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE) in hclge_clear_arfs_rules()
6492 struct hclge_dev *hdev = vport->back; in hclge_get_hw_reset_stat()
6494 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || in hclge_get_hw_reset_stat()
6495 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); in hclge_get_hw_reset_stat()
6501 struct hclge_dev *hdev = vport->back; in hclge_get_cmdq_stat()
6503 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); in hclge_get_cmdq_stat()
6509 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_resetting()
6511 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_ae_dev_resetting()
6517 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_reset_cnt()
6519 return hdev->rst_stats.hw_reset_done_cnt; in hclge_ae_dev_reset_cnt()
6525 struct hclge_dev *hdev = vport->back; in hclge_enable_fd()
6528 hdev->fd_en = enable; in hclge_enable_fd()
6529 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; in hclge_enable_fd()
6532 spin_lock_bh(&hdev->fd_rule_lock); in hclge_enable_fd()
6534 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_enable_fd()
6563 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); in hclge_cfg_mac_mode()
6565 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_mode()
6567 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_mode()
6585 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; in hclge_config_switch_param()
6586 req->func_id = cpu_to_le32(func_id); in hclge_config_switch_param()
6588 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
6590 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
6597 req->switch_param = (req->switch_param & param_mask) | switch_param; in hclge_config_switch_param()
6598 req->param_mask = param_mask; in hclge_config_switch_param()
6600 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
6602 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
6612 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_phy_link_status_wait()
6619 dev_err(&hdev->pdev->dev, in hclge_phy_link_status_wait()
6624 if (phydev->link == link_ret) in hclge_phy_link_status_wait()
6648 return -EBUSY; in hclge_mac_link_status_wait()
6674 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
6676 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
6682 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); in hclge_set_app_loopback()
6685 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); in hclge_set_app_loopback()
6691 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
6693 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
6720 dev_err(&hdev->pdev->dev, in hclge_cfg_serdes_loopback()
6722 return -ENOTSUPP; in hclge_cfg_serdes_loopback()
6726 req->enable = loop_mode_b; in hclge_cfg_serdes_loopback()
6727 req->mask = loop_mode_b; in hclge_cfg_serdes_loopback()
6729 req->mask = loop_mode_b; in hclge_cfg_serdes_loopback()
6732 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_serdes_loopback()
6734 dev_err(&hdev->pdev->dev, in hclge_cfg_serdes_loopback()
6743 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_serdes_loopback()
6745 dev_err(&hdev->pdev->dev, in hclge_cfg_serdes_loopback()
6750 !(req->result & HCLGE_CMD_SERDES_DONE_B)); in hclge_cfg_serdes_loopback()
6752 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { in hclge_cfg_serdes_loopback()
6753 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); in hclge_cfg_serdes_loopback()
6754 return -EBUSY; in hclge_cfg_serdes_loopback()
6755 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { in hclge_cfg_serdes_loopback()
6756 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); in hclge_cfg_serdes_loopback()
6757 return -EIO; in hclge_cfg_serdes_loopback()
6775 dev_err(&hdev->pdev->dev, in hclge_set_serdes_loopback()
6786 if (!phydev->suspended) { in hclge_enable_phy_loopback()
6813 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_phy_loopback()
6817 return -ENOTSUPP; in hclge_set_phy_loopback()
6824 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
6833 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
6848 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); in hclge_tqp_enable()
6849 req->stream_id = cpu_to_le16(stream_id); in hclge_tqp_enable()
6851 req->enable |= 1U << HCLGE_TQP_ENABLE_B; in hclge_tqp_enable()
6853 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_tqp_enable()
6855 dev_err(&hdev->pdev->dev, in hclge_tqp_enable()
6865 struct hclge_dev *hdev = vport->back; in hclge_set_loopback()
6873 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_set_loopback()
6894 ret = -ENOTSUPP; in hclge_set_loopback()
6895 dev_err(&hdev->pdev->dev, in hclge_set_loopback()
6903 kinfo = &vport->nic.kinfo; in hclge_set_loopback()
6904 for (i = 0; i < kinfo->num_tqps; i++) { in hclge_set_loopback()
6937 kinfo = &vport->nic.kinfo; in hclge_reset_tqp_stats()
6938 for (i = 0; i < kinfo->num_tqps; i++) { in hclge_reset_tqp_stats()
6939 queue = handle->kinfo.tqp[i]; in hclge_reset_tqp_stats()
6941 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); in hclge_reset_tqp_stats()
6949 unsigned long last = hdev->serv_processed_cnt; in hclge_flush_link_update()
6952 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && in hclge_flush_link_update()
6954 last == hdev->serv_processed_cnt) in hclge_flush_link_update()
6961 struct hclge_dev *hdev = vport->back; in hclge_set_timer_task()
6967 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_set_timer_task()
6978 struct hclge_dev *hdev = vport->back; in hclge_ae_start()
6982 clear_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_start()
6983 hdev->hw.mac.link = 0; in hclge_ae_start()
6996 struct hclge_dev *hdev = vport->back; in hclge_ae_stop()
6999 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_stop()
7000 spin_lock_bh(&hdev->fd_rule_lock); in hclge_ae_stop()
7002 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_ae_stop()
7007 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && in hclge_ae_stop()
7008 hdev->reset_type != HNAE3_FUNC_RESET) { in hclge_ae_stop()
7014 for (i = 0; i < handle->kinfo.num_tqps; i++) in hclge_ae_stop()
7031 struct hclge_dev *hdev = vport->back; in hclge_vport_start()
7033 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_vport_start()
7034 vport->last_active_jiffies = jiffies; in hclge_vport_start()
7036 if (test_bit(vport->vport_id, hdev->vport_config_block)) { in hclge_vport_start()
7037 if (vport->vport_id) { in hclge_vport_start()
7045 clear_bit(vport->vport_id, hdev->vport_config_block); in hclge_vport_start()
7052 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); in hclge_vport_stop()
7073 struct hclge_dev *hdev = vport->back; in hclge_get_mac_vlan_cmd_status()
7076 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
7079 return -EIO; in hclge_get_mac_vlan_cmd_status()
7087 return -ENOSPC; in hclge_get_mac_vlan_cmd_status()
7089 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
7092 return -EIO; in hclge_get_mac_vlan_cmd_status()
7097 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
7099 return -ENOENT; in hclge_get_mac_vlan_cmd_status()
7102 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
7105 return -EIO; in hclge_get_mac_vlan_cmd_status()
7110 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
7112 return -ENOENT; in hclge_get_mac_vlan_cmd_status()
7115 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
7118 return -EIO; in hclge_get_mac_vlan_cmd_status()
7121 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
7124 return -EINVAL; in hclge_get_mac_vlan_cmd_status()
7135 return -EIO; in hclge_update_desc_vfid()
7145 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; in hclge_update_desc_vfid()
7178 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); in hclge_prepare_mac_addr()
7180 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); in hclge_prepare_mac_addr()
7181 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); in hclge_prepare_mac_addr()
7184 new_req->mac_addr_hi32 = cpu_to_le32(high_val); in hclge_prepare_mac_addr()
7185 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); in hclge_prepare_mac_addr()
7191 struct hclge_dev *hdev = vport->back; in hclge_remove_mac_vlan_tbl()
7201 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_remove_mac_vlan_tbl()
7203 dev_err(&hdev->pdev->dev, in hclge_remove_mac_vlan_tbl()
7220 struct hclge_dev *hdev = vport->back; in hclge_lookup_mac_vlan_tbl()
7238 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_lookup_mac_vlan_tbl()
7243 ret = hclge_cmd_send(&hdev->hw, desc, 1); in hclge_lookup_mac_vlan_tbl()
7246 dev_err(&hdev->pdev->dev, in hclge_lookup_mac_vlan_tbl()
7262 struct hclge_dev *hdev = vport->back; in hclge_add_mac_vlan_tbl()
7276 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mac_vlan_tbl()
7292 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); in hclge_add_mac_vlan_tbl()
7302 dev_err(&hdev->pdev->dev, in hclge_add_mac_vlan_tbl()
7321 req->space_size = cpu_to_le32(space_size); in hclge_set_umv_space()
7323 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_umv_space()
7325 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", in hclge_set_umv_space()
7340 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); in hclge_init_umv_space()
7344 if (allocated_size < hdev->wanted_umv_size) in hclge_init_umv_space()
7345 dev_warn(&hdev->pdev->dev, in hclge_init_umv_space()
7347 hdev->wanted_umv_size, allocated_size); in hclge_init_umv_space()
7349 hdev->max_umv_size = allocated_size; in hclge_init_umv_space()
7350 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); in hclge_init_umv_space()
7351 hdev->share_umv_size = hdev->priv_umv_size + in hclge_init_umv_space()
7352 hdev->max_umv_size % (hdev->num_alloc_vport + 1); in hclge_init_umv_space()
7362 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_umv_space()
7363 vport = &hdev->vport[i]; in hclge_reset_umv_space()
7364 vport->used_umv_num = 0; in hclge_reset_umv_space()
7367 mutex_lock(&hdev->vport_lock); in hclge_reset_umv_space()
7368 hdev->share_umv_size = hdev->priv_umv_size + in hclge_reset_umv_space()
7369 hdev->max_umv_size % (hdev->num_alloc_vport + 1); in hclge_reset_umv_space()
7370 mutex_unlock(&hdev->vport_lock); in hclge_reset_umv_space()
7375 struct hclge_dev *hdev = vport->back; in hclge_is_umv_space_full()
7379 mutex_lock(&hdev->vport_lock); in hclge_is_umv_space_full()
7381 is_full = (vport->used_umv_num >= hdev->priv_umv_size && in hclge_is_umv_space_full()
7382 hdev->share_umv_size == 0); in hclge_is_umv_space_full()
7385 mutex_unlock(&hdev->vport_lock); in hclge_is_umv_space_full()
7392 struct hclge_dev *hdev = vport->back; in hclge_update_umv_space()
7395 if (vport->used_umv_num > hdev->priv_umv_size) in hclge_update_umv_space()
7396 hdev->share_umv_size++; in hclge_update_umv_space()
7398 if (vport->used_umv_num > 0) in hclge_update_umv_space()
7399 vport->used_umv_num--; in hclge_update_umv_space()
7401 if (vport->used_umv_num >= hdev->priv_umv_size && in hclge_update_umv_space()
7402 hdev->share_umv_size > 0) in hclge_update_umv_space()
7403 hdev->share_umv_size--; in hclge_update_umv_space()
7404 vport->used_umv_num++; in hclge_update_umv_space()
7414 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) in hclge_find_mac_node()
7426 if (mac_node->state == HCLGE_MAC_TO_DEL) in hclge_update_mac_node()
7427 mac_node->state = HCLGE_MAC_ACTIVE; in hclge_update_mac_node()
7431 if (mac_node->state == HCLGE_MAC_TO_ADD) { in hclge_update_mac_node()
7432 list_del(&mac_node->node); in hclge_update_mac_node()
7435 mac_node->state = HCLGE_MAC_TO_DEL; in hclge_update_mac_node()
7438 /* only from tmp_add_list, the mac_node->state won't be in hclge_update_mac_node()
7442 if (mac_node->state == HCLGE_MAC_TO_ADD) in hclge_update_mac_node()
7443 mac_node->state = HCLGE_MAC_ACTIVE; in hclge_update_mac_node()
7454 struct hclge_dev *hdev = vport->back; in hclge_update_mac_list()
7459 &vport->uc_mac_list : &vport->mc_mac_list; in hclge_update_mac_list()
7461 spin_lock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
7470 spin_unlock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
7471 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); in hclge_update_mac_list()
7477 spin_unlock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
7478 dev_err(&hdev->pdev->dev, in hclge_update_mac_list()
7481 return -ENOENT; in hclge_update_mac_list()
7486 spin_unlock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
7487 return -ENOMEM; in hclge_update_mac_list()
7490 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); in hclge_update_mac_list()
7492 mac_node->state = state; in hclge_update_mac_list()
7493 ether_addr_copy(mac_node->mac_addr, addr); in hclge_update_mac_list()
7494 list_add_tail(&mac_node->node, list); in hclge_update_mac_list()
7496 spin_unlock_bh(&vport->mac_list_lock); in hclge_update_mac_list()
7513 struct hclge_dev *hdev = vport->back; in hclge_add_uc_addr_common()
7523 dev_err(&hdev->pdev->dev, in hclge_add_uc_addr_common()
7528 return -EINVAL; in hclge_add_uc_addr_common()
7534 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); in hclge_add_uc_addr_common()
7545 if (ret == -ENOENT) { in hclge_add_uc_addr_common()
7546 mutex_lock(&hdev->vport_lock); in hclge_add_uc_addr_common()
7551 mutex_unlock(&hdev->vport_lock); in hclge_add_uc_addr_common()
7554 mutex_unlock(&hdev->vport_lock); in hclge_add_uc_addr_common()
7556 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) in hclge_add_uc_addr_common()
7557 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", in hclge_add_uc_addr_common()
7558 hdev->priv_umv_size); in hclge_add_uc_addr_common()
7560 return -ENOSPC; in hclge_add_uc_addr_common()
7565 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n", in hclge_add_uc_addr_common()
7566 vport->vport_id, addr); in hclge_add_uc_addr_common()
7570 dev_err(&hdev->pdev->dev, in hclge_add_uc_addr_common()
7589 struct hclge_dev *hdev = vport->back; in hclge_rm_uc_addr_common()
7597 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n", in hclge_rm_uc_addr_common()
7599 return -EINVAL; in hclge_rm_uc_addr_common()
7607 mutex_lock(&hdev->vport_lock); in hclge_rm_uc_addr_common()
7609 mutex_unlock(&hdev->vport_lock); in hclge_rm_uc_addr_common()
7610 } else if (ret == -ENOENT) { in hclge_rm_uc_addr_common()
7629 struct hclge_dev *hdev = vport->back; in hclge_add_mc_addr_common()
7636 dev_err(&hdev->pdev->dev, in hclge_add_mc_addr_common()
7639 return -EINVAL; in hclge_add_mc_addr_common()
7650 status = hclge_update_desc_vfid(desc, vport->vport_id, false); in hclge_add_mc_addr_common()
7656 if (status == -ENOSPC && in hclge_add_mc_addr_common()
7657 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) in hclge_add_mc_addr_common()
7658 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); in hclge_add_mc_addr_common()
7675 struct hclge_dev *hdev = vport->back; in hclge_rm_mc_addr_common()
7682 dev_dbg(&hdev->pdev->dev, in hclge_rm_mc_addr_common()
7685 return -EINVAL; in hclge_rm_mc_addr_common()
7693 status = hclge_update_desc_vfid(desc, vport->vport_id, true); in hclge_rm_mc_addr_common()
7704 } else if (status == -ENOENT) { in hclge_rm_mc_addr_common()
7720 ret = sync(vport, mac_node->mac_addr); in hclge_sync_vport_mac_list()
7722 mac_node->state = HCLGE_MAC_ACTIVE; in hclge_sync_vport_mac_list()
7725 &vport->state); in hclge_sync_vport_mac_list()
7740 ret = unsync(vport, mac_node->mac_addr); in hclge_unsync_vport_mac_list()
7741 if (!ret || ret == -ENOENT) { in hclge_unsync_vport_mac_list()
7742 list_del(&mac_node->node); in hclge_unsync_vport_mac_list()
7746 &vport->state); in hclge_unsync_vport_mac_list()
7759 if (mac_node->state == HCLGE_MAC_TO_ADD) in hclge_sync_from_add_list()
7770 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); in hclge_sync_from_add_list()
7772 hclge_update_mac_node(new_node, mac_node->state); in hclge_sync_from_add_list()
7773 list_del(&mac_node->node); in hclge_sync_from_add_list()
7775 } else if (mac_node->state == HCLGE_MAC_ACTIVE) { in hclge_sync_from_add_list()
7776 mac_node->state = HCLGE_MAC_TO_DEL; in hclge_sync_from_add_list()
7777 list_del(&mac_node->node); in hclge_sync_from_add_list()
7778 list_add_tail(&mac_node->node, mac_list); in hclge_sync_from_add_list()
7780 list_del(&mac_node->node); in hclge_sync_from_add_list()
7794 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); in hclge_sync_from_del_list()
7803 new_node->state = HCLGE_MAC_ACTIVE; in hclge_sync_from_del_list()
7804 list_del(&mac_node->node); in hclge_sync_from_del_list()
7807 list_del(&mac_node->node); in hclge_sync_from_del_list()
7808 list_add_tail(&mac_node->node, mac_list); in hclge_sync_from_del_list()
7819 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; in hclge_update_overflow_flags()
7821 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; in hclge_update_overflow_flags()
7824 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; in hclge_update_overflow_flags()
7826 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; in hclge_update_overflow_flags()
7845 &vport->uc_mac_list : &vport->mc_mac_list; in hclge_sync_vport_mac_table()
7847 spin_lock_bh(&vport->mac_list_lock); in hclge_sync_vport_mac_table()
7850 switch (mac_node->state) { in hclge_sync_vport_mac_table()
7852 list_del(&mac_node->node); in hclge_sync_vport_mac_table()
7853 list_add_tail(&mac_node->node, &tmp_del_list); in hclge_sync_vport_mac_table()
7859 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); in hclge_sync_vport_mac_table()
7860 new_node->state = mac_node->state; in hclge_sync_vport_mac_table()
7861 list_add_tail(&new_node->node, &tmp_add_list); in hclge_sync_vport_mac_table()
7869 spin_unlock_bh(&vport->mac_list_lock); in hclge_sync_vport_mac_table()
7887 spin_lock_bh(&vport->mac_list_lock); in hclge_sync_vport_mac_table()
7892 spin_unlock_bh(&vport->mac_list_lock); in hclge_sync_vport_mac_table()
7899 struct hclge_dev *hdev = vport->back; in hclge_need_sync_mac_table()
7901 if (test_bit(vport->vport_id, hdev->vport_config_block)) in hclge_need_sync_mac_table()
7904 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) in hclge_need_sync_mac_table()
7914 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_mac_table()
7915 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_mac_table()
7930 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_mac_table()
7935 list = &vport->uc_mac_list; in hclge_rm_vport_all_mac_table()
7938 list = &vport->mc_mac_list; in hclge_rm_vport_all_mac_table()
7945 set_bit(vport->vport_id, hdev->vport_config_block); in hclge_rm_vport_all_mac_table()
7947 spin_lock_bh(&vport->mac_list_lock); in hclge_rm_vport_all_mac_table()
7950 switch (mac_cfg->state) { in hclge_rm_vport_all_mac_table()
7953 list_del(&mac_cfg->node); in hclge_rm_vport_all_mac_table()
7954 list_add_tail(&mac_cfg->node, &tmp_del_list); in hclge_rm_vport_all_mac_table()
7958 list_del(&mac_cfg->node); in hclge_rm_vport_all_mac_table()
7965 spin_unlock_bh(&vport->mac_list_lock); in hclge_rm_vport_all_mac_table()
7968 ret = unsync(vport, mac_cfg->mac_addr); in hclge_rm_vport_all_mac_table()
7969 if (!ret || ret == -ENOENT) { in hclge_rm_vport_all_mac_table()
7975 mac_cfg->state == HCLGE_MAC_ACTIVE) { in hclge_rm_vport_all_mac_table()
7976 mac_cfg->state = HCLGE_MAC_TO_ADD; in hclge_rm_vport_all_mac_table()
7978 list_del(&mac_cfg->node); in hclge_rm_vport_all_mac_table()
7982 mac_cfg->state = HCLGE_MAC_TO_DEL; in hclge_rm_vport_all_mac_table()
7986 spin_lock_bh(&vport->mac_list_lock); in hclge_rm_vport_all_mac_table()
7990 spin_unlock_bh(&vport->mac_list_lock); in hclge_rm_vport_all_mac_table()
7998 struct hclge_dev *hdev = vport->back; in hclge_uninit_vport_mac_list()
8004 &vport->uc_mac_list : &vport->mc_mac_list; in hclge_uninit_vport_mac_list()
8006 spin_lock_bh(&vport->mac_list_lock); in hclge_uninit_vport_mac_list()
8009 switch (mac_node->state) { in hclge_uninit_vport_mac_list()
8012 list_del(&mac_node->node); in hclge_uninit_vport_mac_list()
8013 list_add_tail(&mac_node->node, &tmp_del_list); in hclge_uninit_vport_mac_list()
8016 list_del(&mac_node->node); in hclge_uninit_vport_mac_list()
8022 spin_unlock_bh(&vport->mac_list_lock); in hclge_uninit_vport_mac_list()
8032 dev_warn(&hdev->pdev->dev, in hclge_uninit_vport_mac_list()
8035 vport->vport_id); in hclge_uninit_vport_mac_list()
8038 list_del(&mac_node->node); in hclge_uninit_vport_mac_list()
8048 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_mac_table()
8049 vport = &hdev->vport[i]; in hclge_uninit_mac_table()
8066 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
8069 return -EIO; in hclge_get_mac_ethertype_cmd_status()
8078 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
8080 return_status = -EIO; in hclge_get_mac_ethertype_cmd_status()
8083 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
8085 return_status = -EIO; in hclge_get_mac_ethertype_cmd_status()
8088 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
8091 return_status = -EIO; in hclge_get_mac_ethertype_cmd_status()
8101 struct hclge_dev *hdev = vport->back; in hclge_check_vf_mac_exist()
8111 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); in hclge_check_vf_mac_exist()
8115 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT) in hclge_check_vf_mac_exist()
8119 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) in hclge_check_vf_mac_exist()
8121 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac)) in hclge_check_vf_mac_exist()
8131 struct hclge_dev *hdev = vport->back; in hclge_set_vf_mac()
8135 return -EINVAL; in hclge_set_vf_mac()
8137 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { in hclge_set_vf_mac()
8138 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
8145 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n", in hclge_set_vf_mac()
8147 return -EEXIST; in hclge_set_vf_mac()
8150 ether_addr_copy(vport->vf_info.mac, mac_addr); in hclge_set_vf_mac()
8152 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { in hclge_set_vf_mac()
8153 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
8159 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n", in hclge_set_vf_mac()
8175 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mgr_tbl()
8177 dev_err(&hdev->pdev->dev, in hclge_add_mgr_tbl()
8197 dev_err(&hdev->pdev->dev, in init_mgr_tbl()
8210 struct hclge_dev *hdev = vport->back; in hclge_get_mac_addr()
8212 ether_addr_copy(p, hdev->hw.mac.mac_addr); in hclge_get_mac_addr()
8218 struct list_head *list = &vport->uc_mac_list; in hclge_update_mac_node_for_dev_addr()
8225 return -ENOMEM; in hclge_update_mac_node_for_dev_addr()
8227 new_node->state = HCLGE_MAC_TO_ADD; in hclge_update_mac_node_for_dev_addr()
8228 ether_addr_copy(new_node->mac_addr, new_addr); in hclge_update_mac_node_for_dev_addr()
8229 list_add(&new_node->node, list); in hclge_update_mac_node_for_dev_addr()
8231 if (new_node->state == HCLGE_MAC_TO_DEL) in hclge_update_mac_node_for_dev_addr()
8232 new_node->state = HCLGE_MAC_ACTIVE; in hclge_update_mac_node_for_dev_addr()
8235 * addr may be not re-added into mac table for the umv space in hclge_update_mac_node_for_dev_addr()
8239 list_move(&new_node->node, list); in hclge_update_mac_node_for_dev_addr()
8245 if (old_node->state == HCLGE_MAC_TO_ADD) { in hclge_update_mac_node_for_dev_addr()
8246 list_del(&old_node->node); in hclge_update_mac_node_for_dev_addr()
8249 old_node->state = HCLGE_MAC_TO_DEL; in hclge_update_mac_node_for_dev_addr()
8254 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); in hclge_update_mac_node_for_dev_addr()
8264 struct hclge_dev *hdev = vport->back; in hclge_set_mac_addr()
8272 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
8275 return -EINVAL; in hclge_set_mac_addr()
8280 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
8287 old_addr = hdev->hw.mac.mac_addr; in hclge_set_mac_addr()
8289 spin_lock_bh(&vport->mac_list_lock); in hclge_set_mac_addr()
8292 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
8295 spin_unlock_bh(&vport->mac_list_lock); in hclge_set_mac_addr()
8305 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); in hclge_set_mac_addr()
8306 spin_unlock_bh(&vport->mac_list_lock); in hclge_set_mac_addr()
8317 struct hclge_dev *hdev = vport->back; in hclge_do_ioctl()
8319 if (!hdev->hw.mac.phydev) in hclge_do_ioctl()
8320 return -EOPNOTSUPP; in hclge_do_ioctl()
8322 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); in hclge_do_ioctl()
8335 req->vlan_type = vlan_type; in hclge_set_vlan_filter_ctrl()
8336 req->vf_id = vf_id; in hclge_set_vlan_filter_ctrl()
8338 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
8340 dev_err(&hdev->pdev->dev, in hclge_set_vlan_filter_ctrl()
8347 req->vlan_fe = filter_en ? in hclge_set_vlan_filter_ctrl()
8348 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); in hclge_set_vlan_filter_ctrl()
8350 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
8352 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n", in hclge_set_vlan_filter_ctrl()
8373 struct hclge_dev *hdev = vport->back; in hclge_enable_vlan_filter()
8375 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_enable_vlan_filter()
8386 handle->netdev_flags |= HNAE3_VLAN_FLTR; in hclge_enable_vlan_filter()
8388 handle->netdev_flags &= ~HNAE3_VLAN_FLTR; in hclge_enable_vlan_filter()
8395 struct hclge_vport *vport = &hdev->vport[vfid]; in hclge_set_vf_vlan_common()
8408 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { in hclge_set_vf_vlan_common()
8409 if (vport->vf_info.spoofchk && vlan) { in hclge_set_vf_vlan_common()
8410 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
8412 return -EPERM; in hclge_set_vf_vlan_common()
8430 req0->vlan_id = cpu_to_le16(vlan); in hclge_set_vf_vlan_common()
8431 req0->vlan_cfg = is_kill; in hclge_set_vf_vlan_common()
8434 req0->vf_bitmap[vf_byte_off] = vf_byte_val; in hclge_set_vf_vlan_common()
8436 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; in hclge_set_vf_vlan_common()
8438 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_set_vf_vlan_common()
8440 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
8448 if (!req0->resp_code || req0->resp_code == 1) in hclge_set_vf_vlan_common()
8451 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { in hclge_set_vf_vlan_common()
8452 set_bit(vfid, hdev->vf_vlan_full); in hclge_set_vf_vlan_common()
8453 dev_warn(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
8458 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
8460 req0->resp_code); in hclge_set_vf_vlan_common()
8463 if (!req0->resp_code) in hclge_set_vf_vlan_common()
8471 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) in hclge_set_vf_vlan_common()
8474 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
8476 req0->resp_code); in hclge_set_vf_vlan_common()
8479 return -EIO; in hclge_set_vf_vlan_common()
8500 req->vlan_offset = vlan_offset_160; in hclge_set_port_vlan_filter()
8501 req->vlan_cfg = is_kill; in hclge_set_port_vlan_filter()
8502 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; in hclge_set_port_vlan_filter()
8504 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_port_vlan_filter()
8506 dev_err(&hdev->pdev->dev, in hclge_set_port_vlan_filter()
8524 dev_err(&hdev->pdev->dev, in hclge_set_vlan_filter_hw()
8532 test_bit(vport_id, hdev->vlan_table[vlan_id])) in hclge_set_vlan_filter_hw()
8535 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_set_vlan_filter_hw()
8536 dev_err(&hdev->pdev->dev, in hclge_set_vlan_filter_hw()
8539 return -EINVAL; in hclge_set_vlan_filter_hw()
8543 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_set_vlan_filter_hw()
8544 dev_err(&hdev->pdev->dev, in hclge_set_vlan_filter_hw()
8547 return -EINVAL; in hclge_set_vlan_filter_hw()
8550 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) in hclge_set_vlan_filter_hw()
8562 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; in hclge_set_vlan_tx_offload_cfg()
8564 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_tx_offload_cfg()
8572 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); in hclge_set_vlan_tx_offload_cfg()
8573 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); in hclge_set_vlan_tx_offload_cfg()
8574 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, in hclge_set_vlan_tx_offload_cfg()
8575 vcfg->accept_tag1 ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
8576 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, in hclge_set_vlan_tx_offload_cfg()
8577 vcfg->accept_untag1 ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
8578 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, in hclge_set_vlan_tx_offload_cfg()
8579 vcfg->accept_tag2 ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
8580 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, in hclge_set_vlan_tx_offload_cfg()
8581 vcfg->accept_untag2 ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
8582 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, in hclge_set_vlan_tx_offload_cfg()
8583 vcfg->insert_tag1_en ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
8584 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, in hclge_set_vlan_tx_offload_cfg()
8585 vcfg->insert_tag2_en ? 1 : 0); in hclge_set_vlan_tx_offload_cfg()
8586 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); in hclge_set_vlan_tx_offload_cfg()
8588 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; in hclge_set_vlan_tx_offload_cfg()
8589 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / in hclge_set_vlan_tx_offload_cfg()
8591 req->vf_bitmap[bmap_index] = in hclge_set_vlan_tx_offload_cfg()
8592 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); in hclge_set_vlan_tx_offload_cfg()
8594 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_tx_offload_cfg()
8596 dev_err(&hdev->pdev->dev, in hclge_set_vlan_tx_offload_cfg()
8605 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; in hclge_set_vlan_rx_offload_cfg()
8607 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_rx_offload_cfg()
8615 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, in hclge_set_vlan_rx_offload_cfg()
8616 vcfg->strip_tag1_en ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
8617 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, in hclge_set_vlan_rx_offload_cfg()
8618 vcfg->strip_tag2_en ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
8619 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, in hclge_set_vlan_rx_offload_cfg()
8620 vcfg->vlan1_vlan_prionly ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
8621 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, in hclge_set_vlan_rx_offload_cfg()
8622 vcfg->vlan2_vlan_prionly ? 1 : 0); in hclge_set_vlan_rx_offload_cfg()
8624 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; in hclge_set_vlan_rx_offload_cfg()
8625 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / in hclge_set_vlan_rx_offload_cfg()
8627 req->vf_bitmap[bmap_index] = in hclge_set_vlan_rx_offload_cfg()
8628 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); in hclge_set_vlan_rx_offload_cfg()
8630 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_rx_offload_cfg()
8632 dev_err(&hdev->pdev->dev, in hclge_set_vlan_rx_offload_cfg()
8646 vport->txvlan_cfg.accept_tag1 = true; in hclge_vlan_offload_cfg()
8647 vport->txvlan_cfg.insert_tag1_en = false; in hclge_vlan_offload_cfg()
8648 vport->txvlan_cfg.default_tag1 = 0; in hclge_vlan_offload_cfg()
8650 vport->txvlan_cfg.accept_tag1 = false; in hclge_vlan_offload_cfg()
8651 vport->txvlan_cfg.insert_tag1_en = true; in hclge_vlan_offload_cfg()
8652 vport->txvlan_cfg.default_tag1 = vlan_tag; in hclge_vlan_offload_cfg()
8655 vport->txvlan_cfg.accept_untag1 = true; in hclge_vlan_offload_cfg()
8661 vport->txvlan_cfg.accept_tag2 = true; in hclge_vlan_offload_cfg()
8662 vport->txvlan_cfg.accept_untag2 = true; in hclge_vlan_offload_cfg()
8663 vport->txvlan_cfg.insert_tag2_en = false; in hclge_vlan_offload_cfg()
8664 vport->txvlan_cfg.default_tag2 = 0; in hclge_vlan_offload_cfg()
8667 vport->rxvlan_cfg.strip_tag1_en = false; in hclge_vlan_offload_cfg()
8668 vport->rxvlan_cfg.strip_tag2_en = in hclge_vlan_offload_cfg()
8669 vport->rxvlan_cfg.rx_vlan_offload_en; in hclge_vlan_offload_cfg()
8671 vport->rxvlan_cfg.strip_tag1_en = in hclge_vlan_offload_cfg()
8672 vport->rxvlan_cfg.rx_vlan_offload_en; in hclge_vlan_offload_cfg()
8673 vport->rxvlan_cfg.strip_tag2_en = true; in hclge_vlan_offload_cfg()
8675 vport->rxvlan_cfg.vlan1_vlan_prionly = false; in hclge_vlan_offload_cfg()
8676 vport->rxvlan_cfg.vlan2_vlan_prionly = false; in hclge_vlan_offload_cfg()
8694 rx_req->ot_fst_vlan_type = in hclge_set_vlan_protocol_type()
8695 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); in hclge_set_vlan_protocol_type()
8696 rx_req->ot_sec_vlan_type = in hclge_set_vlan_protocol_type()
8697 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); in hclge_set_vlan_protocol_type()
8698 rx_req->in_fst_vlan_type = in hclge_set_vlan_protocol_type()
8699 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); in hclge_set_vlan_protocol_type()
8700 rx_req->in_sec_vlan_type = in hclge_set_vlan_protocol_type()
8701 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); in hclge_set_vlan_protocol_type()
8703 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
8705 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
8714 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); in hclge_set_vlan_protocol_type()
8715 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); in hclge_set_vlan_protocol_type()
8717 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
8719 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
8730 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_init_vlan_config()
8735 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_init_vlan_config()
8737 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vlan_config()
8738 vport = &hdev->vport[i]; in hclge_init_vlan_config()
8743 vport->vport_id); in hclge_init_vlan_config()
8761 handle->netdev_flags |= HNAE3_VLAN_FLTR; in hclge_init_vlan_config()
8763 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; in hclge_init_vlan_config()
8764 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; in hclge_init_vlan_config()
8765 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; in hclge_init_vlan_config()
8766 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; in hclge_init_vlan_config()
8767 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; in hclge_init_vlan_config()
8768 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; in hclge_init_vlan_config()
8774 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vlan_config()
8777 vport = &hdev->vport[i]; in hclge_init_vlan_config()
8778 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag; in hclge_init_vlan_config()
8781 vport->port_base_vlan_cfg.state, in hclge_init_vlan_config()
8799 vlan->hd_tbl_status = writen_to_tbl; in hclge_add_vport_vlan_table()
8800 vlan->vlan_id = vlan_id; in hclge_add_vport_vlan_table()
8802 list_add_tail(&vlan->node, &vport->vlan_list); in hclge_add_vport_vlan_table()
8808 struct hclge_dev *hdev = vport->back; in hclge_add_vport_all_vlan_table()
8811 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_add_vport_all_vlan_table()
8812 if (!vlan->hd_tbl_status) { in hclge_add_vport_all_vlan_table()
8814 vport->vport_id, in hclge_add_vport_all_vlan_table()
8815 vlan->vlan_id, false); in hclge_add_vport_all_vlan_table()
8817 dev_err(&hdev->pdev->dev, in hclge_add_vport_all_vlan_table()
8823 vlan->hd_tbl_status = true; in hclge_add_vport_all_vlan_table()
8833 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_vlan_table()
8835 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_rm_vport_vlan_table()
8836 if (vlan->vlan_id == vlan_id) { in hclge_rm_vport_vlan_table()
8837 if (is_write_tbl && vlan->hd_tbl_status) in hclge_rm_vport_vlan_table()
8840 vport->vport_id, in hclge_rm_vport_vlan_table()
8844 list_del(&vlan->node); in hclge_rm_vport_vlan_table()
8854 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_vlan_table()
8856 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_rm_vport_all_vlan_table()
8857 if (vlan->hd_tbl_status) in hclge_rm_vport_all_vlan_table()
8860 vport->vport_id, in hclge_rm_vport_all_vlan_table()
8861 vlan->vlan_id, in hclge_rm_vport_all_vlan_table()
8864 vlan->hd_tbl_status = false; in hclge_rm_vport_all_vlan_table()
8866 list_del(&vlan->node); in hclge_rm_vport_all_vlan_table()
8870 clear_bit(vport->vport_id, hdev->vf_vlan_full); in hclge_rm_vport_all_vlan_table()
8879 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_vport_vlan_table()
8880 vport = &hdev->vport[i]; in hclge_uninit_vport_vlan_table()
8881 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_uninit_vport_vlan_table()
8882 list_del(&vlan->node); in hclge_uninit_vport_vlan_table()
8891 struct hclge_dev *hdev = vport->back; in hclge_restore_vport_vlan_table()
8897 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto; in hclge_restore_vport_vlan_table()
8898 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag; in hclge_restore_vport_vlan_table()
8899 state = vport->port_base_vlan_cfg.state; in hclge_restore_vport_vlan_table()
8902 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); in hclge_restore_vport_vlan_table()
8904 vport->vport_id, vlan_id, in hclge_restore_vport_vlan_table()
8909 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { in hclge_restore_vport_vlan_table()
8911 vport->vport_id, in hclge_restore_vport_vlan_table()
8912 vlan->vlan_id, false); in hclge_restore_vport_vlan_table()
8915 vlan->hd_tbl_status = true; in hclge_restore_vport_vlan_table()
8930 if (mac_node->state == HCLGE_MAC_ACTIVE) { in hclge_mac_node_convert_for_reset()
8931 mac_node->state = HCLGE_MAC_TO_ADD; in hclge_mac_node_convert_for_reset()
8932 } else if (mac_node->state == HCLGE_MAC_TO_DEL) { in hclge_mac_node_convert_for_reset()
8933 list_del(&mac_node->node); in hclge_mac_node_convert_for_reset()
8941 spin_lock_bh(&vport->mac_list_lock); in hclge_restore_mac_table_common()
8943 hclge_mac_node_convert_for_reset(&vport->uc_mac_list); in hclge_restore_mac_table_common()
8944 hclge_mac_node_convert_for_reset(&vport->mc_mac_list); in hclge_restore_mac_table_common()
8945 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); in hclge_restore_mac_table_common()
8947 spin_unlock_bh(&vport->mac_list_lock); in hclge_restore_mac_table_common()
8952 struct hclge_vport *vport = &hdev->vport[0]; in hclge_restore_hw_table()
8953 struct hnae3_handle *handle = &vport->nic; in hclge_restore_hw_table()
8957 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); in hclge_restore_hw_table()
8966 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { in hclge_en_hw_strip_rxvtag()
8967 vport->rxvlan_cfg.strip_tag1_en = false; in hclge_en_hw_strip_rxvtag()
8968 vport->rxvlan_cfg.strip_tag2_en = enable; in hclge_en_hw_strip_rxvtag()
8970 vport->rxvlan_cfg.strip_tag1_en = enable; in hclge_en_hw_strip_rxvtag()
8971 vport->rxvlan_cfg.strip_tag2_en = true; in hclge_en_hw_strip_rxvtag()
8973 vport->rxvlan_cfg.vlan1_vlan_prionly = false; in hclge_en_hw_strip_rxvtag()
8974 vport->rxvlan_cfg.vlan2_vlan_prionly = false; in hclge_en_hw_strip_rxvtag()
8975 vport->rxvlan_cfg.rx_vlan_offload_en = enable; in hclge_en_hw_strip_rxvtag()
8985 struct hclge_dev *hdev = vport->back; in hclge_update_vlan_filter_entries()
8991 htons(new_info->vlan_proto), in hclge_update_vlan_filter_entries()
8992 vport->vport_id, in hclge_update_vlan_filter_entries()
8993 new_info->vlan_tag, in hclge_update_vlan_filter_entries()
8997 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), in hclge_update_vlan_filter_entries()
8998 vport->vport_id, old_info->vlan_tag, in hclge_update_vlan_filter_entries()
9009 struct hnae3_handle *nic = &vport->nic; in hclge_update_port_base_vlan_cfg()
9011 struct hclge_dev *hdev = vport->back; in hclge_update_port_base_vlan_cfg()
9014 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; in hclge_update_port_base_vlan_cfg()
9016 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag); in hclge_update_port_base_vlan_cfg()
9023 htons(vlan_info->vlan_proto), in hclge_update_port_base_vlan_cfg()
9024 vport->vport_id, in hclge_update_port_base_vlan_cfg()
9025 vlan_info->vlan_tag, in hclge_update_port_base_vlan_cfg()
9032 htons(old_vlan_info->vlan_proto), in hclge_update_port_base_vlan_cfg()
9033 vport->vport_id, in hclge_update_port_base_vlan_cfg()
9034 old_vlan_info->vlan_tag, in hclge_update_port_base_vlan_cfg()
9047 /* update state only when disable/enable port based VLAN */ in hclge_update_port_base_vlan_cfg()
9048 vport->port_base_vlan_cfg.state = state; in hclge_update_port_base_vlan_cfg()
9050 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; in hclge_update_port_base_vlan_cfg()
9052 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; in hclge_update_port_base_vlan_cfg()
9055 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag; in hclge_update_port_base_vlan_cfg()
9056 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos; in hclge_update_port_base_vlan_cfg()
9057 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto; in hclge_update_port_base_vlan_cfg()
9074 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan) in hclge_get_port_base_vlan_state()
9085 struct hclge_dev *hdev = vport->back; in hclge_set_vf_vlan_filter()
9090 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vf_vlan_filter()
9091 return -EOPNOTSUPP; in hclge_set_vf_vlan_filter()
9095 return -EINVAL; in hclge_set_vf_vlan_filter()
9098 if (vlan > VLAN_N_VID - 1 || qos > 7) in hclge_set_vf_vlan_filter()
9099 return -EINVAL; in hclge_set_vf_vlan_filter()
9101 return -EPROTONOSUPPORT; in hclge_set_vf_vlan_filter()
9104 vport->port_base_vlan_cfg.state, in hclge_set_vf_vlan_filter()
9113 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { in hclge_set_vf_vlan_filter()
9117 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], in hclge_set_vf_vlan_filter()
9118 vport->vport_id, state, in hclge_set_vf_vlan_filter()
9133 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { in hclge_clear_vf_vlan()
9134 vport = &hdev->vport[vf]; in hclge_clear_vf_vlan()
9135 vlan_info = &vport->port_base_vlan_cfg.vlan_info; in hclge_clear_vf_vlan()
9138 vport->vport_id, in hclge_clear_vf_vlan()
9139 vlan_info->vlan_tag, true); in hclge_clear_vf_vlan()
9141 dev_err(&hdev->pdev->dev, in hclge_clear_vf_vlan()
9143 vf - HCLGE_VF_VPORT_START_NUM, ret); in hclge_clear_vf_vlan()
9151 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_filter()
9159 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_set_vlan_filter()
9160 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { in hclge_set_vlan_filter()
9161 set_bit(vlan_id, vport->vlan_del_fail_bmap); in hclge_set_vlan_filter()
9162 return -EBUSY; in hclge_set_vlan_filter()
9171 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { in hclge_set_vlan_filter()
9172 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, in hclge_set_vlan_filter()
9188 set_bit(vlan_id, vport->vlan_del_fail_bmap); in hclge_set_vlan_filter()
9201 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_vlan_filter()
9202 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_vlan_filter()
9204 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, in hclge_sync_vlan_filter()
9208 vport->vport_id, vlan_id, in hclge_sync_vlan_filter()
9210 if (ret && ret != -EINVAL) in hclge_sync_vlan_filter()
9213 clear_bit(vlan_id, vport->vlan_del_fail_bmap); in hclge_sync_vlan_filter()
9220 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, in hclge_sync_vlan_filter()
9234 req->max_frm_size = cpu_to_le16(new_mps); in hclge_set_mac_mtu()
9235 req->min_frm_size = HCLGE_MAC_MIN_FRAME; in hclge_set_mac_mtu()
9237 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_mac_mtu()
9249 struct hclge_dev *hdev = vport->back; in hclge_set_vport_mtu()
9256 return -EINVAL; in hclge_set_vport_mtu()
9259 mutex_lock(&hdev->vport_lock); in hclge_set_vport_mtu()
9260 /* VF's mps must fit within hdev->mps */ in hclge_set_vport_mtu()
9261 if (vport->vport_id && max_frm_size > hdev->mps) { in hclge_set_vport_mtu()
9262 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
9263 return -EINVAL; in hclge_set_vport_mtu()
9264 } else if (vport->vport_id) { in hclge_set_vport_mtu()
9265 vport->mps = max_frm_size; in hclge_set_vport_mtu()
9266 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
9271 for (i = 1; i < hdev->num_alloc_vport; i++) in hclge_set_vport_mtu()
9272 if (max_frm_size < hdev->vport[i].mps) { in hclge_set_vport_mtu()
9273 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
9274 return -EINVAL; in hclge_set_vport_mtu()
9281 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
9286 hdev->mps = max_frm_size; in hclge_set_vport_mtu()
9287 vport->mps = max_frm_size; in hclge_set_vport_mtu()
9291 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
9296 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
9310 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); in hclge_send_reset_tqp_cmd()
9312 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); in hclge_send_reset_tqp_cmd()
9314 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_send_reset_tqp_cmd()
9316 dev_err(&hdev->pdev->dev, in hclge_send_reset_tqp_cmd()
9333 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); in hclge_get_reset_status()
9335 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_reset_status()
9337 dev_err(&hdev->pdev->dev, in hclge_get_reset_status()
9342 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); in hclge_get_reset_status()
9350 queue = handle->kinfo.tqp[queue_id]; in hclge_covert_handle_qid_global()
9353 return tqp->index; in hclge_covert_handle_qid_global()
9359 struct hclge_dev *hdev = vport->back; in hclge_reset_tqp()
9369 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); in hclge_reset_tqp()
9375 dev_err(&hdev->pdev->dev, in hclge_reset_tqp()
9390 dev_err(&hdev->pdev->dev, "Reset TQP fail\n"); in hclge_reset_tqp()
9396 dev_err(&hdev->pdev->dev, in hclge_reset_tqp()
9404 struct hclge_dev *hdev = vport->back; in hclge_reset_vf_queue()
9410 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); in hclge_reset_vf_queue()
9414 dev_warn(&hdev->pdev->dev, in hclge_reset_vf_queue()
9429 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); in hclge_reset_vf_queue()
9435 dev_warn(&hdev->pdev->dev, in hclge_reset_vf_queue()
9442 struct hclge_dev *hdev = vport->back; in hclge_get_fw_version()
9444 return hdev->fw_version; in hclge_get_fw_version()
9449 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_flowctrl_adv()
9461 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) in hclge_cfg_pauseparam()
9466 dev_err(&hdev->pdev->dev, in hclge_cfg_pauseparam()
9474 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_cfg_flowctrl()
9480 if (!phydev->link || !phydev->autoneg) in hclge_cfg_flowctrl()
9483 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); in hclge_cfg_flowctrl()
9485 if (phydev->pause) in hclge_cfg_flowctrl()
9488 if (phydev->asym_pause) in hclge_cfg_flowctrl()
9496 if (phydev->duplex == HCLGE_MAC_HALF) { in hclge_cfg_flowctrl()
9508 struct hclge_dev *hdev = vport->back; in hclge_get_pauseparam()
9509 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_pauseparam()
9513 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_get_pauseparam()
9519 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { in hclge_get_pauseparam()
9522 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { in hclge_get_pauseparam()
9525 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { in hclge_get_pauseparam()
9538 hdev->fc_mode_last_time = HCLGE_FC_FULL; in hclge_record_user_pauseparam()
9540 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; in hclge_record_user_pauseparam()
9542 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; in hclge_record_user_pauseparam()
9544 hdev->fc_mode_last_time = HCLGE_FC_NONE; in hclge_record_user_pauseparam()
9546 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; in hclge_record_user_pauseparam()
9553 struct hclge_dev *hdev = vport->back; in hclge_set_pauseparam()
9554 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_pauseparam()
9560 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
9561 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); in hclge_set_pauseparam()
9562 return -EOPNOTSUPP; in hclge_set_pauseparam()
9566 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_set_pauseparam()
9567 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
9569 return -EOPNOTSUPP; in hclge_set_pauseparam()
9582 return -EOPNOTSUPP; in hclge_set_pauseparam()
9589 struct hclge_dev *hdev = vport->back; in hclge_get_ksettings_an_result()
9592 *speed = hdev->hw.mac.speed; in hclge_get_ksettings_an_result()
9594 *duplex = hdev->hw.mac.duplex; in hclge_get_ksettings_an_result()
9596 *auto_neg = hdev->hw.mac.autoneg; in hclge_get_ksettings_an_result()
9603 struct hclge_dev *hdev = vport->back; in hclge_get_media_type()
9612 *media_type = hdev->hw.mac.media_type; in hclge_get_media_type()
9615 *module_type = hdev->hw.mac.module_type; in hclge_get_media_type()
9622 struct hclge_dev *hdev = vport->back; in hclge_get_mdix_mode()
9623 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mdix_mode()
9670 struct device *dev = &hdev->pdev->dev; in hclge_info_show()
9674 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); in hclge_info_show()
9675 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); in hclge_info_show()
9676 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); in hclge_info_show()
9677 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); in hclge_info_show()
9678 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport); in hclge_info_show()
9679 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); in hclge_info_show()
9680 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); in hclge_info_show()
9681 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); in hclge_info_show()
9682 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); in hclge_info_show()
9683 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); in hclge_info_show()
9685 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); in hclge_info_show()
9687 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable"); in hclge_info_show()
9689 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable"); in hclge_info_show()
9697 struct hnae3_client *client = vport->nic.client; in hclge_init_nic_client_instance()
9698 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_nic_client_instance()
9699 int rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_nic_client_instance()
9702 ret = client->ops->init_instance(&vport->nic); in hclge_init_nic_client_instance()
9706 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
9707 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_nic_client_instance()
9708 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_nic_client_instance()
9709 ret = -EBUSY; in hclge_init_nic_client_instance()
9716 dev_err(&ae_dev->pdev->dev, in hclge_init_nic_client_instance()
9723 if (netif_msg_drv(&hdev->vport->nic)) in hclge_init_nic_client_instance()
9729 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
9730 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_nic_client_instance()
9733 client->ops->uninit_instance(&vport->nic, 0); in hclge_init_nic_client_instance()
9741 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_roce_client_instance()
9746 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || in hclge_init_roce_client_instance()
9747 !hdev->nic_client) in hclge_init_roce_client_instance()
9750 client = hdev->roce_client; in hclge_init_roce_client_instance()
9755 rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_roce_client_instance()
9756 ret = client->ops->init_instance(&vport->roce); in hclge_init_roce_client_instance()
9760 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
9761 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_roce_client_instance()
9762 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_roce_client_instance()
9763 ret = -EBUSY; in hclge_init_roce_client_instance()
9770 dev_err(&ae_dev->pdev->dev, in hclge_init_roce_client_instance()
9780 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
9781 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_roce_client_instance()
9784 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); in hclge_init_roce_client_instance()
9792 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_client_instance()
9796 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { in hclge_init_client_instance()
9797 vport = &hdev->vport[i]; in hclge_init_client_instance()
9799 switch (client->type) { in hclge_init_client_instance()
9801 hdev->nic_client = client; in hclge_init_client_instance()
9802 vport->nic.client = client; in hclge_init_client_instance()
9814 hdev->roce_client = client; in hclge_init_client_instance()
9815 vport->roce.client = client; in hclge_init_client_instance()
9824 return -EINVAL; in hclge_init_client_instance()
9831 hdev->nic_client = NULL; in hclge_init_client_instance()
9832 vport->nic.client = NULL; in hclge_init_client_instance()
9835 hdev->roce_client = NULL; in hclge_init_client_instance()
9836 vport->roce.client = NULL; in hclge_init_client_instance()
9843 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_client_instance()
9847 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { in hclge_uninit_client_instance()
9848 vport = &hdev->vport[i]; in hclge_uninit_client_instance()
9849 if (hdev->roce_client) { in hclge_uninit_client_instance()
9850 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
9851 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_uninit_client_instance()
9854 hdev->roce_client->ops->uninit_instance(&vport->roce, in hclge_uninit_client_instance()
9856 hdev->roce_client = NULL; in hclge_uninit_client_instance()
9857 vport->roce.client = NULL; in hclge_uninit_client_instance()
9859 if (client->type == HNAE3_CLIENT_ROCE) in hclge_uninit_client_instance()
9861 if (hdev->nic_client && client->ops->uninit_instance) { in hclge_uninit_client_instance()
9862 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
9863 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_uninit_client_instance()
9866 client->ops->uninit_instance(&vport->nic, 0); in hclge_uninit_client_instance()
9867 hdev->nic_client = NULL; in hclge_uninit_client_instance()
9868 vport->nic.client = NULL; in hclge_uninit_client_instance()
9875 struct pci_dev *pdev = hdev->pdev; in hclge_pci_init()
9881 dev_err(&pdev->dev, "failed to enable PCI device\n"); in hclge_pci_init()
9885 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in hclge_pci_init()
9887 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in hclge_pci_init()
9889 dev_err(&pdev->dev, in hclge_pci_init()
9893 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); in hclge_pci_init()
9898 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); in hclge_pci_init()
9903 hw = &hdev->hw; in hclge_pci_init()
9904 hw->io_base = pcim_iomap(pdev, 2, 0); in hclge_pci_init()
9905 if (!hw->io_base) { in hclge_pci_init()
9906 dev_err(&pdev->dev, "Can't map configuration register space\n"); in hclge_pci_init()
9907 ret = -ENOMEM; in hclge_pci_init()
9911 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); in hclge_pci_init()
9925 struct pci_dev *pdev = hdev->pdev; in hclge_pci_uninit()
9927 pcim_iounmap(pdev, hdev->hw.io_base); in hclge_pci_uninit()
9936 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); in hclge_state_init()
9937 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_init()
9938 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); in hclge_state_init()
9939 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_state_init()
9940 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_state_init()
9941 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); in hclge_state_init()
9942 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_state_init()
9947 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_uninit()
9948 set_bit(HCLGE_STATE_REMOVING, &hdev->state); in hclge_state_uninit()
9950 if (hdev->reset_timer.function) in hclge_state_uninit()
9951 del_timer_sync(&hdev->reset_timer); in hclge_state_uninit()
9952 if (hdev->service_task.work.func) in hclge_state_uninit()
9953 cancel_delayed_work_sync(&hdev->service_task); in hclge_state_uninit()
9961 struct hclge_dev *hdev = ae_dev->priv; in hclge_flr_prepare()
9966 down(&hdev->reset_sem); in hclge_flr_prepare()
9967 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_flr_prepare()
9968 hdev->reset_type = HNAE3_FLR_RESET; in hclge_flr_prepare()
9970 if (ret || hdev->reset_pending) { in hclge_flr_prepare()
9971 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", in hclge_flr_prepare()
9973 if (hdev->reset_pending || in hclge_flr_prepare()
9975 dev_err(&hdev->pdev->dev, in hclge_flr_prepare()
9977 hdev->reset_pending, retry_cnt); in hclge_flr_prepare()
9978 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_flr_prepare()
9979 up(&hdev->reset_sem); in hclge_flr_prepare()
9986 hclge_enable_vector(&hdev->misc_vector, false); in hclge_flr_prepare()
9987 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); in hclge_flr_prepare()
9988 hdev->rst_stats.flr_rst_cnt++; in hclge_flr_prepare()
9993 struct hclge_dev *hdev = ae_dev->priv; in hclge_flr_done()
9996 hclge_enable_vector(&hdev->misc_vector, true); in hclge_flr_done()
10000 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); in hclge_flr_done()
10002 hdev->reset_type = HNAE3_NONE_RESET; in hclge_flr_done()
10003 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_flr_done()
10004 up(&hdev->reset_sem); in hclge_flr_done()
10011 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_clear_resetting_state()
10012 struct hclge_vport *vport = &hdev->vport[i]; in hclge_clear_resetting_state()
10016 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); in hclge_clear_resetting_state()
10018 dev_warn(&hdev->pdev->dev, in hclge_clear_resetting_state()
10020 vport->vport_id, ret); in hclge_clear_resetting_state()
10026 struct pci_dev *pdev = ae_dev->pdev; in hclge_init_ae_dev()
10030 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); in hclge_init_ae_dev()
10032 return -ENOMEM; in hclge_init_ae_dev()
10034 hdev->pdev = pdev; in hclge_init_ae_dev()
10035 hdev->ae_dev = ae_dev; in hclge_init_ae_dev()
10036 hdev->reset_type = HNAE3_NONE_RESET; in hclge_init_ae_dev()
10037 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_init_ae_dev()
10038 ae_dev->priv = hdev; in hclge_init_ae_dev()
10041 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; in hclge_init_ae_dev()
10043 mutex_init(&hdev->vport_lock); in hclge_init_ae_dev()
10044 spin_lock_init(&hdev->fd_rule_lock); in hclge_init_ae_dev()
10045 sema_init(&hdev->reset_sem, 1); in hclge_init_ae_dev()
10067 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n", in hclge_init_ae_dev()
10074 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); in hclge_init_ae_dev()
10080 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); in hclge_init_ae_dev()
10090 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); in hclge_init_ae_dev()
10102 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { in hclge_init_ae_dev()
10114 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); in hclge_init_ae_dev()
10120 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); in hclge_init_ae_dev()
10130 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); in hclge_init_ae_dev()
10136 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); in hclge_init_ae_dev()
10143 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); in hclge_init_ae_dev()
10149 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); in hclge_init_ae_dev()
10155 dev_err(&pdev->dev, in hclge_init_ae_dev()
10160 INIT_KFIFO(hdev->mac_tnl_log); in hclge_init_ae_dev()
10164 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); in hclge_init_ae_dev()
10165 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); in hclge_init_ae_dev()
10181 if (ae_dev->hw_err_reset_req) { in hclge_init_ae_dev()
10185 &ae_dev->hw_err_reset_req); in hclge_init_ae_dev()
10187 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_init_ae_dev()
10191 hclge_enable_vector(&hdev->misc_vector, true); in hclge_init_ae_dev()
10194 hdev->last_reset_time = jiffies; in hclge_init_ae_dev()
10196 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", in hclge_init_ae_dev()
10204 if (hdev->hw.mac.phydev) in hclge_init_ae_dev()
10205 mdiobus_unregister(hdev->hw.mac.mdio_bus); in hclge_init_ae_dev()
10213 pcim_iounmap(pdev, hdev->hw.io_base); in hclge_init_ae_dev()
10218 mutex_destroy(&hdev->vport_lock); in hclge_init_ae_dev()
10224 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); in hclge_stats_clear()
10246 dev_err(&hdev->pdev->dev, in hclge_set_vf_spoofchk_hw()
10254 dev_err(&hdev->pdev->dev, in hclge_set_vf_spoofchk_hw()
10265 struct hclge_dev *hdev = vport->back; in hclge_set_vf_spoofchk()
10269 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vf_spoofchk()
10270 return -EOPNOTSUPP; in hclge_set_vf_spoofchk()
10274 return -EINVAL; in hclge_set_vf_spoofchk()
10276 if (vport->vf_info.spoofchk == new_spoofchk) in hclge_set_vf_spoofchk()
10279 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) in hclge_set_vf_spoofchk()
10280 dev_warn(&hdev->pdev->dev, in hclge_set_vf_spoofchk()
10284 dev_warn(&hdev->pdev->dev, in hclge_set_vf_spoofchk()
10288 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); in hclge_set_vf_spoofchk()
10292 vport->vf_info.spoofchk = new_spoofchk; in hclge_set_vf_spoofchk()
10298 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_spoofchk()
10302 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_reset_vport_spoofchk()
10306 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_spoofchk()
10307 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, in hclge_reset_vport_spoofchk()
10308 vport->vf_info.spoofchk); in hclge_reset_vport_spoofchk()
10321 struct hclge_dev *hdev = vport->back; in hclge_set_vf_trust()
10322 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclge_set_vf_trust()
10329 return -EINVAL; in hclge_set_vf_trust()
10331 if (vport->vf_info.trusted == new_trusted) in hclge_set_vf_trust()
10335 if (!enable && vport->vf_info.promisc_enable) { in hclge_set_vf_trust()
10336 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; in hclge_set_vf_trust()
10341 vport->vf_info.promisc_enable = 0; in hclge_set_vf_trust()
10345 vport->vf_info.trusted = new_trusted; in hclge_set_vf_trust()
10356 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { in hclge_reset_vf_rate()
10357 struct hclge_vport *vport = &hdev->vport[vf]; in hclge_reset_vf_rate()
10359 vport->vf_info.max_tx_rate = 0; in hclge_reset_vf_rate()
10360 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); in hclge_reset_vf_rate()
10362 dev_err(&hdev->pdev->dev, in hclge_reset_vf_rate()
10364 vf - HCLGE_VF_VPORT_START_NUM, ret); in hclge_reset_vf_rate()
10372 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { in hclge_vf_rate_param_check()
10373 dev_err(&hdev->pdev->dev, in hclge_vf_rate_param_check()
10375 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); in hclge_vf_rate_param_check()
10376 return -EINVAL; in hclge_vf_rate_param_check()
10386 struct hclge_dev *hdev = vport->back; in hclge_set_vf_rate()
10395 return -EINVAL; in hclge_set_vf_rate()
10397 if (!force && max_tx_rate == vport->vf_info.max_tx_rate) in hclge_set_vf_rate()
10404 vport->vf_info.max_tx_rate = max_tx_rate; in hclge_set_vf_rate()
10411 struct hnae3_handle *handle = &hdev->vport->nic; in hclge_resume_vf_rate()
10417 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { in hclge_resume_vf_rate()
10420 return -EINVAL; in hclge_resume_vf_rate()
10425 if (!vport->vf_info.max_tx_rate) in hclge_resume_vf_rate()
10429 vport->vf_info.max_tx_rate, true); in hclge_resume_vf_rate()
10431 dev_err(&hdev->pdev->dev, in hclge_resume_vf_rate()
10433 vf, vport->vf_info.max_tx_rate, ret); in hclge_resume_vf_rate()
10443 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_state()
10446 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_state()
10454 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_ae_dev()
10455 struct pci_dev *pdev = ae_dev->pdev; in hclge_reset_ae_dev()
10458 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_reset_ae_dev()
10464 if (hdev->reset_type == HNAE3_IMP_RESET || in hclge_reset_ae_dev()
10465 hdev->reset_type == HNAE3_GLOBAL_RESET) { in hclge_reset_ae_dev()
10466 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); in hclge_reset_ae_dev()
10467 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); in hclge_reset_ae_dev()
10468 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); in hclge_reset_ae_dev()
10474 dev_err(&pdev->dev, "Cmd queue init failed\n"); in hclge_reset_ae_dev()
10480 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); in hclge_reset_ae_dev()
10486 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); in hclge_reset_ae_dev()
10492 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); in hclge_reset_ae_dev()
10502 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); in hclge_reset_ae_dev()
10508 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); in hclge_reset_ae_dev()
10514 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); in hclge_reset_ae_dev()
10520 dev_err(&pdev->dev, in hclge_reset_ae_dev()
10527 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); in hclge_reset_ae_dev()
10534 /* Re-enable the hw error interrupts because in hclge_reset_ae_dev()
10539 dev_err(&pdev->dev, in hclge_reset_ae_dev()
10540 "fail(%d) to re-enable NIC hw error interrupts\n", in hclge_reset_ae_dev()
10545 if (hdev->roce_client) { in hclge_reset_ae_dev()
10548 dev_err(&pdev->dev, in hclge_reset_ae_dev()
10549 "fail(%d) to re-enable roce ras interrupts\n", in hclge_reset_ae_dev()
10564 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", in hclge_reset_ae_dev()
10572 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_ae_dev()
10573 struct hclge_mac *mac = &hdev->hw.mac; in hclge_uninit_ae_dev()
10581 if (mac->phydev) in hclge_uninit_ae_dev()
10582 mdiobus_unregister(mac->mdio_bus); in hclge_uninit_ae_dev()
10585 hclge_enable_vector(&hdev->misc_vector, false); in hclge_uninit_ae_dev()
10586 synchronize_irq(hdev->misc_vector.vector_irq); in hclge_uninit_ae_dev()
10596 mutex_destroy(&hdev->vport_lock); in hclge_uninit_ae_dev()
10598 ae_dev->priv = NULL; in hclge_uninit_ae_dev()
10603 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hclge_get_max_channels()
10605 struct hclge_dev *hdev = vport->back; in hclge_get_max_channels()
10607 return min_t(u32, hdev->rss_size_max, in hclge_get_max_channels()
10608 vport->alloc_tqps / kinfo->num_tc); in hclge_get_max_channels()
10614 ch->max_combined = hclge_get_max_channels(handle); in hclge_get_channels()
10615 ch->other_count = 1; in hclge_get_channels()
10616 ch->max_other = 1; in hclge_get_channels()
10617 ch->combined_count = handle->kinfo.rss_size; in hclge_get_channels()
10624 struct hclge_dev *hdev = vport->back; in hclge_get_tqps_and_rss_info()
10626 *alloc_tqps = vport->alloc_tqps; in hclge_get_tqps_and_rss_info()
10627 *max_rss_size = hdev->rss_size_max; in hclge_get_tqps_and_rss_info()
10634 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; in hclge_set_channels()
10636 struct hclge_dev *hdev = vport->back; in hclge_set_channels()
10638 u16 cur_rss_size = kinfo->rss_size; in hclge_set_channels()
10639 u16 cur_tqps = kinfo->num_tqps; in hclge_set_channels()
10646 kinfo->req_rss_size = new_tqps_num; in hclge_set_channels()
10650 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); in hclge_set_channels()
10654 roundup_size = roundup_pow_of_two(kinfo->rss_size); in hclge_set_channels()
10660 if (!(hdev->hw_tc_map & BIT(i))) in hclge_set_channels()
10665 tc_offset[i] = kinfo->rss_size * i; in hclge_set_channels()
10678 return -ENOMEM; in hclge_set_channels()
10681 rss_indir[i] = i % kinfo->rss_size; in hclge_set_channels()
10685 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", in hclge_set_channels()
10692 dev_info(&hdev->pdev->dev, in hclge_set_channels()
10694 cur_rss_size, kinfo->rss_size, in hclge_set_channels()
10695 cur_tqps, kinfo->rss_size * kinfo->num_tc); in hclge_set_channels()
10708 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_regs_num()
10710 dev_err(&hdev->pdev->dev, in hclge_get_regs_num()
10720 return -EINVAL; in hclge_get_regs_num()
10747 return -ENOMEM; in hclge_get_32_bit_regs()
10750 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); in hclge_get_32_bit_regs()
10752 dev_err(&hdev->pdev->dev, in hclge_get_32_bit_regs()
10761 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num; in hclge_get_32_bit_regs()
10769 regs_num--; in hclge_get_32_bit_regs()
10801 return -ENOMEM; in hclge_get_64_bit_regs()
10804 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); in hclge_get_64_bit_regs()
10806 dev_err(&hdev->pdev->dev, in hclge_get_64_bit_regs()
10815 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len; in hclge_get_64_bit_regs()
10823 regs_num--; in hclge_get_64_bit_regs()
10846 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) { in hclge_query_bd_num_cmd_send()
10855 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT); in hclge_query_bd_num_cmd_send()
10868 dev_err(&hdev->pdev->dev, in hclge_get_dfx_reg_bd_num()
10892 for (i = 0; i < bd_num - 1; i++) { in hclge_dfx_reg_cmd_send()
10893 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); in hclge_dfx_reg_cmd_send()
10899 ret = hclge_cmd_send(&hdev->hw, desc, bd_num); in hclge_dfx_reg_cmd_send()
10901 dev_err(&hdev->pdev->dev, in hclge_dfx_reg_cmd_send()
10915 entries_per_desc = ARRAY_SIZE(desc->data); in hclge_dfx_reg_fetch_data()
10917 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK); in hclge_dfx_reg_fetch_data()
10939 dev_err(&hdev->pdev->dev, in hclge_get_dfx_reg_len()
10966 dev_err(&hdev->pdev->dev, in hclge_get_dfx_reg()
10978 return -ENOMEM; in hclge_get_dfx_reg()
10985 dev_err(&hdev->pdev->dev, in hclge_get_dfx_reg()
11007 /* fetching per-PF registers valus from PF PCIe register space */ in hclge_fetch_pf_reg()
11009 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); in hclge_fetch_pf_reg()
11011 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); in hclge_fetch_pf_reg()
11017 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); in hclge_fetch_pf_reg()
11019 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); in hclge_fetch_pf_reg()
11025 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); in hclge_fetch_pf_reg()
11026 for (j = 0; j < kinfo->num_tqps; j++) { in hclge_fetch_pf_reg()
11028 *reg++ = hclge_read_dev(&hdev->hw, in hclge_fetch_pf_reg()
11034 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps; in hclge_fetch_pf_reg()
11037 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); in hclge_fetch_pf_reg()
11038 for (j = 0; j < hdev->num_msi_used - 1; j++) { in hclge_fetch_pf_reg()
11040 *reg++ = hclge_read_dev(&hdev->hw, in hclge_fetch_pf_reg()
11046 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1); in hclge_fetch_pf_reg()
11054 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hclge_get_regs_len()
11056 struct hclge_dev *hdev = vport->back; in hclge_get_regs_len()
11063 dev_err(&hdev->pdev->dev, in hclge_get_regs_len()
11070 dev_err(&hdev->pdev->dev, in hclge_get_regs_len()
11088 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + in hclge_get_regs_len()
11089 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit + in hclge_get_regs_len()
11096 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hclge_get_regs()
11098 struct hclge_dev *hdev = vport->back; in hclge_get_regs()
11103 *version = hdev->fw_version; in hclge_get_regs()
11107 dev_err(&hdev->pdev->dev, in hclge_get_regs()
11116 dev_err(&hdev->pdev->dev, in hclge_get_regs()
11122 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); in hclge_get_regs()
11128 dev_err(&hdev->pdev->dev, in hclge_get_regs()
11134 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); in hclge_get_regs()
11140 dev_err(&hdev->pdev->dev, in hclge_get_regs()
11153 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, in hclge_set_led_status()
11156 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_led_status()
11158 dev_err(&hdev->pdev->dev, in hclge_set_led_status()
11174 struct hclge_dev *hdev = vport->back; in hclge_set_led_id()
11182 return -EINVAL; in hclge_set_led_id()
11192 struct hclge_dev *hdev = vport->back; in hclge_get_link_mode()
11196 supported[idx] = hdev->hw.mac.supported[idx]; in hclge_get_link_mode()
11197 advertising[idx] = hdev->hw.mac.advertising[idx]; in hclge_get_link_mode()
11204 struct hclge_dev *hdev = vport->back; in hclge_gro_en()
11211 struct hclge_vport *vport = &hdev->vport[0]; in hclge_sync_promisc_mode()
11212 struct hnae3_handle *handle = &vport->nic; in hclge_sync_promisc_mode()
11216 if (vport->last_promisc_flags != vport->overflow_promisc_flags) { in hclge_sync_promisc_mode()
11217 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); in hclge_sync_promisc_mode()
11218 vport->last_promisc_flags = vport->overflow_promisc_flags; in hclge_sync_promisc_mode()
11221 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) { in hclge_sync_promisc_mode()
11222 tmp_flags = handle->netdev_flags | vport->last_promisc_flags; in hclge_sync_promisc_mode()
11226 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); in hclge_sync_promisc_mode()
11240 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_module_existed()
11242 dev_err(&hdev->pdev->dev, in hclge_module_existed()
11271 if (i < HCLGE_SFP_INFO_CMD_NUM - 1) in hclge_get_sfp_eeprom_info()
11277 sfp_info_bd0->offset = cpu_to_le16((u16)offset); in hclge_get_sfp_eeprom_info()
11279 sfp_info_bd0->read_len = cpu_to_le16(read_len); in hclge_get_sfp_eeprom_info()
11281 ret = hclge_cmd_send(&hdev->hw, desc, i); in hclge_get_sfp_eeprom_info()
11283 dev_err(&hdev->pdev->dev, in hclge_get_sfp_eeprom_info()
11290 memcpy(data, sfp_info_bd0->data, copy_len); in hclge_get_sfp_eeprom_info()
11298 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN); in hclge_get_sfp_eeprom_info()
11310 struct hclge_dev *hdev = vport->back; in hclge_get_module_eeprom()
11314 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) in hclge_get_module_eeprom()
11315 return -EOPNOTSUPP; in hclge_get_module_eeprom()
11318 return -ENXIO; in hclge_get_module_eeprom()
11323 len - read_len, in hclge_get_module_eeprom()
11326 return -EIO; in hclge_get_module_eeprom()
11442 return -ENOMEM; in hclge_init()