Lines Matching +full:75 +full:- +full:ec

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
14 * struct i40e_stats - definition for an ethtool statistic
15 * @stat_string: statistic name to display in ethtool -S output
63 I40E_QUEUE_STAT("%s-%u.packets", stats.packets),
64 I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes),
68 * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer
91 p = (char *)pointer + stat->stat_offset; in i40e_add_one_ethtool_stat()
92 switch (stat->sizeof_stat) { in i40e_add_one_ethtool_stat()
107 stat->stat_string); in i40e_add_one_ethtool_stat()
113 * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer
137 * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer
153 * i40e_add_queue_stats - copy queue statistics into supplied buffer
177 * non-null before attempting to access its syncp. in i40e_add_queue_stats()
180 start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp); in i40e_add_queue_stats()
185 } while (ring && u64_stats_fetch_retry(&ring->syncp, start)); in i40e_add_queue_stats()
192 * __i40e_add_stat_strings - copy stat strings into ethtool buffer
216 * i40e_add_stat_strings - copy stat strings into ethtool buffer
313 * The PF_STATs are appended to the netdev stats only when ethtool -S
446 I40E_PRIV_FLAG("total-port-shutdown",
449 I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENA, 0),
450 I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENA, 0),
451 I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENA, 0),
452 I40E_PRIV_FLAG("link-down-on-close",
454 I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX_ENA, 0),
455 I40E_PRIV_FLAG("disable-source-pruning",
457 I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_FW_LLDP_DIS, 0),
458 I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
459 I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
460 I40E_PRIV_FLAG("vf-vlan-pruning",
468 I40E_PRIV_FLAG("vf-true-promisc-support",
475 * i40e_partition_setting_complaint - generic complaint for MFP restriction
480 dev_info(&pf->pdev->dev, in i40e_partition_setting_complaint()
485 * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
493 struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info; in i40e_phy_type_to_ethtool()
494 u64 phy_types = pf->hw.phy.phy_types; in i40e_phy_type_to_ethtool()
502 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) in i40e_phy_type_to_ethtool()
505 if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) { in i40e_phy_type_to_ethtool()
519 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
526 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
533 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB) in i40e_phy_type_to_ethtool()
540 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB) in i40e_phy_type_to_ethtool()
553 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB) in i40e_phy_type_to_ethtool()
560 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) in i40e_phy_type_to_ethtool()
567 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) in i40e_phy_type_to_ethtool()
592 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB) in i40e_phy_type_to_ethtool()
599 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
604 !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) { in i40e_phy_type_to_ethtool()
607 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
612 !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) { in i40e_phy_type_to_ethtool()
615 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) in i40e_phy_type_to_ethtool()
623 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) in i40e_phy_type_to_ethtool()
630 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) in i40e_phy_type_to_ethtool()
638 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) in i40e_phy_type_to_ethtool()
646 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) in i40e_phy_type_to_ethtool()
659 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) { in i40e_phy_type_to_ethtool()
673 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
680 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
687 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
696 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) in i40e_phy_type_to_ethtool()
733 * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask
763 * i40e_get_settings_link_up - Get the Link settings for when link is up
774 struct i40e_link_status *hw_link_info = &hw->phy.link_info; in i40e_get_settings_link_up()
776 u32 link_speed = hw_link_info->link_speed; in i40e_get_settings_link_up()
779 switch (hw_link_info->phy_type) { in i40e_get_settings_link_up()
821 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); in i40e_get_settings_link_up()
836 if (hw_link_info->module_type[2] & in i40e_get_settings_link_up()
838 hw_link_info->module_type[2] & in i40e_get_settings_link_up()
842 if (hw_link_info->requested_speeds & in i40e_get_settings_link_up()
847 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_get_settings_link_up()
868 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_get_settings_link_up()
871 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB) in i40e_get_settings_link_up()
874 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB) in i40e_get_settings_link_up()
877 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) in i40e_get_settings_link_up()
880 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) in i40e_get_settings_link_up()
908 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_get_settings_link_up()
911 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); in i40e_get_settings_link_up()
917 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) in i40e_get_settings_link_up()
920 if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) { in i40e_get_settings_link_up()
923 if (hw_link_info->requested_speeds & in i40e_get_settings_link_up()
952 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); in i40e_get_settings_link_up()
970 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); in i40e_get_settings_link_up()
981 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); in i40e_get_settings_link_up()
992 hw_link_info->phy_type); in i40e_get_settings_link_up()
1006 ks->base.speed = SPEED_40000; in i40e_get_settings_link_up()
1009 ks->base.speed = SPEED_25000; in i40e_get_settings_link_up()
1012 ks->base.speed = SPEED_20000; in i40e_get_settings_link_up()
1015 ks->base.speed = SPEED_10000; in i40e_get_settings_link_up()
1018 ks->base.speed = SPEED_5000; in i40e_get_settings_link_up()
1021 ks->base.speed = SPEED_2500; in i40e_get_settings_link_up()
1024 ks->base.speed = SPEED_1000; in i40e_get_settings_link_up()
1027 ks->base.speed = SPEED_100; in i40e_get_settings_link_up()
1030 ks->base.speed = SPEED_UNKNOWN; in i40e_get_settings_link_up()
1033 ks->base.duplex = DUPLEX_FULL; in i40e_get_settings_link_up()
1037 * i40e_get_settings_link_down - Get the Link settings for when link is down
1054 ks->base.speed = SPEED_UNKNOWN; in i40e_get_settings_link_down()
1055 ks->base.duplex = DUPLEX_UNKNOWN; in i40e_get_settings_link_down()
1059 * i40e_get_link_ksettings - Get Link Speed and Duplex settings
1069 struct i40e_pf *pf = np->vsi->back; in i40e_get_link_ksettings()
1070 struct i40e_hw *hw = &pf->hw; in i40e_get_link_ksettings()
1071 struct i40e_link_status *hw_link_info = &hw->phy.link_info; in i40e_get_link_ksettings()
1072 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; in i40e_get_link_ksettings()
1084 ks->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? in i40e_get_link_ksettings()
1088 switch (hw->phy.media_type) { in i40e_get_link_ksettings()
1095 ks->base.port = PORT_NONE; in i40e_get_link_ksettings()
1100 ks->base.port = PORT_TP; in i40e_get_link_ksettings()
1106 ks->base.port = PORT_DA; in i40e_get_link_ksettings()
1111 ks->base.port = PORT_FIBRE; in i40e_get_link_ksettings()
1115 ks->base.port = PORT_OTHER; in i40e_get_link_ksettings()
1123 switch (hw->fc.requested_mode) { in i40e_get_link_ksettings()
1148 * i40e_speed_to_link_speed - Translate decimal speed to i40e_aq_link_speed
1193 ks->link_modes.supported)) { in i40e_speed_to_link_speed()
1212 * i40e_set_link_ksettings - Set Speed and Duplex
1226 struct i40e_pf *pf = np->vsi->back; in i40e_set_link_ksettings()
1228 struct i40e_vsi *vsi = np->vsi; in i40e_set_link_ksettings()
1229 struct i40e_hw *hw = &pf->hw; in i40e_set_link_ksettings()
1240 if (hw->partition_id != 1) { in i40e_set_link_ksettings()
1242 return -EOPNOTSUPP; in i40e_set_link_ksettings()
1244 if (vsi != pf->vsi[pf->lan_vsi]) in i40e_set_link_ksettings()
1245 return -EOPNOTSUPP; in i40e_set_link_ksettings()
1246 if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && in i40e_set_link_ksettings()
1247 hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && in i40e_set_link_ksettings()
1248 hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && in i40e_set_link_ksettings()
1249 hw->phy.media_type != I40E_MEDIA_TYPE_DA && in i40e_set_link_ksettings()
1250 hw->phy.link_info.link_info & I40E_AQ_LINK_UP) in i40e_set_link_ksettings()
1251 return -EOPNOTSUPP; in i40e_set_link_ksettings()
1252 if (hw->device_id == I40E_DEV_ID_KX_B || in i40e_set_link_ksettings()
1253 hw->device_id == I40E_DEV_ID_KX_C || in i40e_set_link_ksettings()
1254 hw->device_id == I40E_DEV_ID_20G_KR2 || in i40e_set_link_ksettings()
1255 hw->device_id == I40E_DEV_ID_20G_KR2_A || in i40e_set_link_ksettings()
1256 hw->device_id == I40E_DEV_ID_25G_B || in i40e_set_link_ksettings()
1257 hw->device_id == I40E_DEV_ID_KX_X722) { in i40e_set_link_ksettings()
1259 return -EOPNOTSUPP; in i40e_set_link_ksettings()
1282 return -EINVAL; in i40e_set_link_ksettings()
1294 return -EOPNOTSUPP; in i40e_set_link_ksettings()
1297 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_set_link_ksettings()
1298 timeout--; in i40e_set_link_ksettings()
1300 return -EBUSY; in i40e_set_link_ksettings()
1308 err = -EAGAIN; in i40e_set_link_ksettings()
1321 if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { in i40e_set_link_ksettings()
1327 err = -EINVAL; in i40e_set_link_ksettings()
1337 if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { in i40e_set_link_ksettings()
1344 hw->phy.media_type != I40E_MEDIA_TYPE_BASET) { in i40e_set_link_ksettings()
1346 err = -EINVAL; in i40e_set_link_ksettings()
1412 err = -EOPNOTSUPP; in i40e_set_link_ksettings()
1421 err = -EOPNOTSUPP; in i40e_set_link_ksettings()
1443 hw->phy.link_info.requested_speeds = config.link_speed; in i40e_set_link_ksettings()
1447 if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) { in i40e_set_link_ksettings()
1462 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_link_ksettings()
1463 err = -EAGAIN; in i40e_set_link_ksettings()
1472 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_link_ksettings()
1479 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_set_link_ksettings()
1488 struct i40e_pf *pf = np->vsi->back; in i40e_set_fec_cfg()
1489 struct i40e_hw *hw = &pf->hw; in i40e_set_fec_cfg()
1498 err = -EAGAIN; in i40e_set_fec_cfg()
1520 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_fec_cfg()
1521 err = -EAGAIN; in i40e_set_fec_cfg()
1524 i40e_set_fec_in_flags(fec_cfg, pf->flags); in i40e_set_fec_cfg()
1534 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_fec_cfg()
1546 struct i40e_pf *pf = np->vsi->back; in i40e_get_fec_param()
1547 struct i40e_hw *hw = &pf->hw; in i40e_get_fec_param()
1557 err = -EAGAIN; in i40e_get_fec_param()
1561 fecparam->fec = 0; in i40e_get_fec_param()
1564 fecparam->fec |= ETHTOOL_FEC_AUTO; in i40e_get_fec_param()
1567 fecparam->fec |= ETHTOOL_FEC_RS; in i40e_get_fec_param()
1570 fecparam->fec |= ETHTOOL_FEC_BASER; in i40e_get_fec_param()
1572 fecparam->fec |= ETHTOOL_FEC_OFF; in i40e_get_fec_param()
1574 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) in i40e_get_fec_param()
1575 fecparam->active_fec = ETHTOOL_FEC_BASER; in i40e_get_fec_param()
1576 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) in i40e_get_fec_param()
1577 fecparam->active_fec = ETHTOOL_FEC_RS; in i40e_get_fec_param()
1579 fecparam->active_fec = ETHTOOL_FEC_OFF; in i40e_get_fec_param()
1588 struct i40e_pf *pf = np->vsi->back; in i40e_set_fec_param()
1589 struct i40e_hw *hw = &pf->hw; in i40e_set_fec_param()
1592 if (hw->device_id != I40E_DEV_ID_25G_SFP28 && in i40e_set_fec_param()
1593 hw->device_id != I40E_DEV_ID_25G_B && in i40e_set_fec_param()
1594 hw->device_id != I40E_DEV_ID_KX_X722) in i40e_set_fec_param()
1595 return -EPERM; in i40e_set_fec_param()
1597 if (hw->mac.type == I40E_MAC_X722 && in i40e_set_fec_param()
1598 !test_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps)) { in i40e_set_fec_param()
1600 return -EOPNOTSUPP; in i40e_set_fec_param()
1603 switch (fecparam->fec) { in i40e_set_fec_param()
1620 dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d", in i40e_set_fec_param()
1621 fecparam->fec); in i40e_set_fec_param()
1622 return -EINVAL; in i40e_set_fec_param()
1632 struct i40e_pf *pf = np->vsi->back; in i40e_nway_reset()
1633 struct i40e_hw *hw = &pf->hw; in i40e_nway_reset()
1634 bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; in i40e_nway_reset()
1641 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_nway_reset()
1642 return -EIO; in i40e_nway_reset()
1649 * i40e_get_pauseparam - Get Flow Control status
1653 * Return tx/rx-pause status
1659 struct i40e_pf *pf = np->vsi->back; in i40e_get_pauseparam()
1660 struct i40e_hw *hw = &pf->hw; in i40e_get_pauseparam()
1661 struct i40e_link_status *hw_link_info = &hw->phy.link_info; in i40e_get_pauseparam()
1662 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; in i40e_get_pauseparam()
1664 pause->autoneg = in i40e_get_pauseparam()
1665 ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? in i40e_get_pauseparam()
1669 if (dcbx_cfg->pfc.pfcenable) { in i40e_get_pauseparam()
1670 pause->rx_pause = 0; in i40e_get_pauseparam()
1671 pause->tx_pause = 0; in i40e_get_pauseparam()
1675 if (hw->fc.current_mode == I40E_FC_RX_PAUSE) { in i40e_get_pauseparam()
1676 pause->rx_pause = 1; in i40e_get_pauseparam()
1677 } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) { in i40e_get_pauseparam()
1678 pause->tx_pause = 1; in i40e_get_pauseparam()
1679 } else if (hw->fc.current_mode == I40E_FC_FULL) { in i40e_get_pauseparam()
1680 pause->rx_pause = 1; in i40e_get_pauseparam()
1681 pause->tx_pause = 1; in i40e_get_pauseparam()
1686 * i40e_set_pauseparam - Set Flow Control parameter
1694 struct i40e_pf *pf = np->vsi->back; in i40e_set_pauseparam()
1695 struct i40e_vsi *vsi = np->vsi; in i40e_set_pauseparam()
1696 struct i40e_hw *hw = &pf->hw; in i40e_set_pauseparam()
1697 struct i40e_link_status *hw_link_info = &hw->phy.link_info; in i40e_set_pauseparam()
1698 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; in i40e_set_pauseparam()
1699 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; in i40e_set_pauseparam()
1708 if (hw->partition_id != 1) { in i40e_set_pauseparam()
1710 return -EOPNOTSUPP; in i40e_set_pauseparam()
1713 if (vsi != pf->vsi[pf->lan_vsi]) in i40e_set_pauseparam()
1714 return -EOPNOTSUPP; in i40e_set_pauseparam()
1716 is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED; in i40e_set_pauseparam()
1717 if (pause->autoneg != is_an) { in i40e_set_pauseparam()
1718 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); in i40e_set_pauseparam()
1719 return -EOPNOTSUPP; in i40e_set_pauseparam()
1723 if (!test_bit(__I40E_DOWN, pf->state) && !is_an) { in i40e_set_pauseparam()
1728 if (dcbx_cfg->pfc.pfcenable) { in i40e_set_pauseparam()
1731 return -EOPNOTSUPP; in i40e_set_pauseparam()
1734 if (pause->rx_pause && pause->tx_pause) in i40e_set_pauseparam()
1735 hw->fc.requested_mode = I40E_FC_FULL; in i40e_set_pauseparam()
1736 else if (pause->rx_pause && !pause->tx_pause) in i40e_set_pauseparam()
1737 hw->fc.requested_mode = I40E_FC_RX_PAUSE; in i40e_set_pauseparam()
1738 else if (!pause->rx_pause && pause->tx_pause) in i40e_set_pauseparam()
1739 hw->fc.requested_mode = I40E_FC_TX_PAUSE; in i40e_set_pauseparam()
1740 else if (!pause->rx_pause && !pause->tx_pause) in i40e_set_pauseparam()
1741 hw->fc.requested_mode = I40E_FC_NONE; in i40e_set_pauseparam()
1743 return -EINVAL; in i40e_set_pauseparam()
1758 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_pauseparam()
1759 err = -EAGAIN; in i40e_set_pauseparam()
1764 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_pauseparam()
1765 err = -EAGAIN; in i40e_set_pauseparam()
1770 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_pauseparam()
1771 err = -EAGAIN; in i40e_set_pauseparam()
1774 if (!test_bit(__I40E_DOWN, pf->state) && is_an) { in i40e_set_pauseparam()
1776 msleep(75); in i40e_set_pauseparam()
1777 if (!test_bit(__I40E_DOWN, pf->state)) in i40e_set_pauseparam()
1787 struct i40e_pf *pf = np->vsi->back; in i40e_get_msglevel()
1788 u32 debug_mask = pf->hw.debug_mask; in i40e_get_msglevel()
1793 return pf->msg_enable; in i40e_get_msglevel()
1799 struct i40e_pf *pf = np->vsi->back; in i40e_set_msglevel()
1802 pf->hw.debug_mask = data; in i40e_set_msglevel()
1804 pf->msg_enable = data; in i40e_set_msglevel()
1822 struct i40e_pf *pf = np->vsi->back; in i40e_get_regs()
1823 struct i40e_hw *hw = &pf->hw; in i40e_get_regs()
1828 /* Tell ethtool which driver-version-specific regs output we have. in i40e_get_regs()
1835 regs->version = 1; in i40e_get_regs()
1853 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_get_eeprom()
1854 struct i40e_pf *pf = np->vsi->back; in i40e_get_eeprom()
1862 if (eeprom->len == 0) in i40e_get_eeprom()
1863 return -EINVAL; in i40e_get_eeprom()
1866 magic = hw->vendor_id | (hw->device_id << 16); in i40e_get_eeprom()
1867 if (eeprom->magic && eeprom->magic != magic) { in i40e_get_eeprom()
1872 if ((eeprom->magic >> 16) != hw->device_id) in i40e_get_eeprom()
1873 errno = -EINVAL; in i40e_get_eeprom()
1874 else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_get_eeprom()
1875 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_get_eeprom()
1876 errno = -EBUSY; in i40e_get_eeprom()
1880 if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) in i40e_get_eeprom()
1881 dev_info(&pf->pdev->dev, in i40e_get_eeprom()
1883 ret_val, hw->aq.asq_last_status, errno, in i40e_get_eeprom()
1884 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), in i40e_get_eeprom()
1885 cmd->offset, cmd->data_size); in i40e_get_eeprom()
1891 eeprom->magic = hw->vendor_id | (hw->device_id << 16); in i40e_get_eeprom()
1893 eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); in i40e_get_eeprom()
1895 return -ENOMEM; in i40e_get_eeprom()
1899 dev_info(&pf->pdev->dev, in i40e_get_eeprom()
1901 ret_val, hw->aq.asq_last_status); in i40e_get_eeprom()
1905 sectors = eeprom->len / I40E_NVM_SECTOR_SIZE; in i40e_get_eeprom()
1906 sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0; in i40e_get_eeprom()
1910 if (i == (sectors - 1)) { in i40e_get_eeprom()
1911 len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); in i40e_get_eeprom()
1914 offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i); in i40e_get_eeprom()
1918 if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { in i40e_get_eeprom()
1919 dev_info(&pf->pdev->dev, in i40e_get_eeprom()
1924 hw->aq.asq_last_status == I40E_AQ_RC_EACCES) { in i40e_get_eeprom()
1925 dev_info(&pf->pdev->dev, in i40e_get_eeprom()
1930 dev_info(&pf->pdev->dev, in i40e_get_eeprom()
1932 offset, ret_val, hw->aq.asq_last_status); in i40e_get_eeprom()
1938 memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); in i40e_get_eeprom()
1947 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_get_eeprom_len()
1951 if (hw->mac.type == I40E_MAC_X722) { in i40e_get_eeprom_len()
1966 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_set_eeprom()
1967 struct i40e_pf *pf = np->vsi->back; in i40e_set_eeprom()
1974 magic = hw->vendor_id | (hw->device_id << 16); in i40e_set_eeprom()
1975 if (eeprom->magic == magic) in i40e_set_eeprom()
1976 errno = -EOPNOTSUPP; in i40e_set_eeprom()
1978 else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) in i40e_set_eeprom()
1979 errno = -EINVAL; in i40e_set_eeprom()
1980 else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_set_eeprom()
1981 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_set_eeprom()
1982 errno = -EBUSY; in i40e_set_eeprom()
1986 if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) in i40e_set_eeprom()
1987 dev_info(&pf->pdev->dev, in i40e_set_eeprom()
1989 ret_val, hw->aq.asq_last_status, errno, in i40e_set_eeprom()
1990 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), in i40e_set_eeprom()
1991 cmd->offset, cmd->data_size); in i40e_set_eeprom()
2000 struct i40e_vsi *vsi = np->vsi; in i40e_get_drvinfo()
2001 struct i40e_pf *pf = vsi->back; in i40e_get_drvinfo()
2003 strscpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); in i40e_get_drvinfo()
2004 i40e_nvm_version_str(&pf->hw, drvinfo->fw_version, in i40e_get_drvinfo()
2005 sizeof(drvinfo->fw_version)); in i40e_get_drvinfo()
2006 strscpy(drvinfo->bus_info, pci_name(pf->pdev), in i40e_get_drvinfo()
2007 sizeof(drvinfo->bus_info)); in i40e_get_drvinfo()
2008 drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; in i40e_get_drvinfo()
2009 if (pf->hw.pf_id == 0) in i40e_get_drvinfo()
2010 drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN; in i40e_get_drvinfo()
2015 struct i40e_hw *hw = &pf->hw; in i40e_get_max_num_descriptors()
2017 switch (hw->mac.type) { in i40e_get_max_num_descriptors()
2031 struct i40e_pf *pf = np->vsi->back; in i40e_get_ringparam()
2032 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_get_ringparam()
2034 ring->rx_max_pending = i40e_get_max_num_descriptors(pf); in i40e_get_ringparam()
2035 ring->tx_max_pending = i40e_get_max_num_descriptors(pf); in i40e_get_ringparam()
2036 ring->rx_mini_max_pending = 0; in i40e_get_ringparam()
2037 ring->rx_jumbo_max_pending = 0; in i40e_get_ringparam()
2038 ring->rx_pending = vsi->rx_rings[0]->count; in i40e_get_ringparam()
2039 ring->tx_pending = vsi->tx_rings[0]->count; in i40e_get_ringparam()
2040 ring->rx_mini_pending = 0; in i40e_get_ringparam()
2041 ring->rx_jumbo_pending = 0; in i40e_get_ringparam()
2047 return index < vsi->num_queue_pairs || in i40e_active_tx_ring_index()
2048 (index >= vsi->alloc_queue_pairs && in i40e_active_tx_ring_index()
2049 index < vsi->alloc_queue_pairs + vsi->num_queue_pairs); in i40e_active_tx_ring_index()
2052 return index < vsi->num_queue_pairs; in i40e_active_tx_ring_index()
2063 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_set_ringparam()
2064 struct i40e_vsi *vsi = np->vsi; in i40e_set_ringparam()
2065 struct i40e_pf *pf = vsi->back; in i40e_set_ringparam()
2070 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in i40e_set_ringparam()
2071 return -EINVAL; in i40e_set_ringparam()
2074 if (ring->tx_pending > max_num_descriptors || in i40e_set_ringparam()
2075 ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS || in i40e_set_ringparam()
2076 ring->rx_pending > max_num_descriptors || in i40e_set_ringparam()
2077 ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) { in i40e_set_ringparam()
2079 "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", in i40e_set_ringparam()
2080 ring->tx_pending, ring->rx_pending, in i40e_set_ringparam()
2082 return -EINVAL; in i40e_set_ringparam()
2085 new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); in i40e_set_ringparam()
2086 new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); in i40e_set_ringparam()
2089 if ((new_tx_count == vsi->tx_rings[0]->count) && in i40e_set_ringparam()
2090 (new_rx_count == vsi->rx_rings[0]->count)) in i40e_set_ringparam()
2094 * disallow changing the number of descriptors -- regardless in i40e_set_ringparam()
2098 return -EBUSY; in i40e_set_ringparam()
2100 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_set_ringparam()
2101 timeout--; in i40e_set_ringparam()
2103 return -EBUSY; in i40e_set_ringparam()
2107 if (!netif_running(vsi->netdev)) { in i40e_set_ringparam()
2108 /* simple case - set for the next time the netdev is started */ in i40e_set_ringparam()
2109 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_set_ringparam()
2110 vsi->tx_rings[i]->count = new_tx_count; in i40e_set_ringparam()
2111 vsi->rx_rings[i]->count = new_rx_count; in i40e_set_ringparam()
2113 vsi->xdp_rings[i]->count = new_tx_count; in i40e_set_ringparam()
2115 vsi->num_tx_desc = new_tx_count; in i40e_set_ringparam()
2116 vsi->num_rx_desc = new_rx_count; in i40e_set_ringparam()
2121 * because the ISRs in MSI-X mode get passed pointers in i40e_set_ringparam()
2126 tx_alloc_queue_pairs = vsi->alloc_queue_pairs * in i40e_set_ringparam()
2128 if (new_tx_count != vsi->tx_rings[0]->count) { in i40e_set_ringparam()
2131 vsi->tx_rings[0]->count, new_tx_count); in i40e_set_ringparam()
2135 err = -ENOMEM; in i40e_set_ringparam()
2143 tx_rings[i] = *vsi->tx_rings[i]; in i40e_set_ringparam()
2153 i--; in i40e_set_ringparam()
2167 if (new_rx_count != vsi->rx_rings[0]->count) { in i40e_set_ringparam()
2170 vsi->rx_rings[0]->count, new_rx_count); in i40e_set_ringparam()
2171 rx_rings = kcalloc(vsi->alloc_queue_pairs, in i40e_set_ringparam()
2174 err = -ENOMEM; in i40e_set_ringparam()
2178 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_set_ringparam()
2182 rx_rings[i] = *vsi->rx_rings[i]; in i40e_set_ringparam()
2189 /* Clear cloned XDP RX-queue info before setup call */ in i40e_set_ringparam()
2194 rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS; in i40e_set_ringparam()
2208 } while (i--); in i40e_set_ringparam()
2225 i40e_free_tx_resources(vsi->tx_rings[i]); in i40e_set_ringparam()
2226 *vsi->tx_rings[i] = tx_rings[i]; in i40e_set_ringparam()
2234 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_set_ringparam()
2235 i40e_free_rx_resources(vsi->rx_rings[i]); in i40e_set_ringparam()
2237 rx_rings[i].tail = vsi->rx_rings[i]->tail; in i40e_set_ringparam()
2240 * but the recycling logic will let us re-use in i40e_set_ringparam()
2247 *vsi->rx_rings[i] = rx_rings[i]; in i40e_set_ringparam()
2253 vsi->num_tx_desc = new_tx_count; in i40e_set_ringparam()
2254 vsi->num_rx_desc = new_rx_count; in i40e_set_ringparam()
2262 i40e_free_tx_resources(vsi->tx_rings[i]); in i40e_set_ringparam()
2269 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_set_ringparam()
2275 * i40e_get_stats_count - return the stats count for a device
2282 * obtaining stats is *not* safe against changes based on non-static
2291 struct i40e_vsi *vsi = np->vsi; in i40e_get_stats_count()
2292 struct i40e_pf *pf = vsi->back; in i40e_get_stats_count()
2295 if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) in i40e_get_stats_count()
2310 * queues in pairs, we'll just use netdev->num_tx_queues * 2. This in i40e_get_stats_count()
2314 stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->num_tx_queues; in i40e_get_stats_count()
2322 struct i40e_vsi *vsi = np->vsi; in i40e_get_sset_count()
2323 struct i40e_pf *pf = vsi->back; in i40e_get_sset_count()
2332 (pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0); in i40e_get_sset_count()
2334 return -EOPNOTSUPP; in i40e_get_sset_count()
2339 * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
2340 * @tc: the TC statistics in VEB structure (veb->tc_stats)
2341 * @i: the index of traffic class in (veb->tc_stats) structure to copy
2343 * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
2352 .tc_rx_packets = tc->tc_rx_packets[i], in i40e_get_veb_tc_stats()
2353 .tc_rx_bytes = tc->tc_rx_bytes[i], in i40e_get_veb_tc_stats()
2354 .tc_tx_packets = tc->tc_tx_packets[i], in i40e_get_veb_tc_stats()
2355 .tc_tx_bytes = tc->tc_tx_bytes[i], in i40e_get_veb_tc_stats()
2362 * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
2366 * The PFC stats are found as arrays in pf->stats, which is not easy to pass
2374 .stat = pf->stats.stat[priority] in i40e_get_pfc_stats()
2387 * i40e_get_ethtool_stats - copy stat values into supplied buffer
2393 * pre-allocated to the size returned by i40e_get_stats_count.. Note that all
2404 struct i40e_vsi *vsi = np->vsi; in i40e_get_ethtool_stats()
2405 struct i40e_pf *pf = vsi->back; in i40e_get_ethtool_stats()
2419 for (i = 0; i < netdev->num_tx_queues; i++) { in i40e_get_ethtool_stats()
2420 i40e_add_queue_stats(&data, READ_ONCE(vsi->tx_rings[i])); in i40e_get_ethtool_stats()
2421 i40e_add_queue_stats(&data, READ_ONCE(vsi->rx_rings[i])); in i40e_get_ethtool_stats()
2425 if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) in i40e_get_ethtool_stats()
2428 veb_stats = ((pf->lan_veb != I40E_NO_VEB) && in i40e_get_ethtool_stats()
2429 (pf->lan_veb < I40E_MAX_VEB) && in i40e_get_ethtool_stats()
2430 test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)); in i40e_get_ethtool_stats()
2433 veb = pf->veb[pf->lan_veb]; in i40e_get_ethtool_stats()
2447 i40e_get_veb_tc_stats(&veb->tc_stats, i); in i40e_get_ethtool_stats()
2465 WARN_ONCE(data - p != i40e_get_stats_count(netdev), in i40e_get_ethtool_stats()
2470 * i40e_get_stat_strings - copy stat strings into supplied buffer
2475 * pre-allocated with the size reported by i40e_get_stats_count. Note that the
2482 struct i40e_vsi *vsi = np->vsi; in i40e_get_stat_strings()
2483 struct i40e_pf *pf = vsi->back; in i40e_get_stat_strings()
2491 for (i = 0; i < netdev->num_tx_queues; i++) { in i40e_get_stat_strings()
2498 if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) in i40e_get_stat_strings()
2512 WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, in i40e_get_stat_strings()
2519 struct i40e_vsi *vsi = np->vsi; in i40e_get_priv_flag_strings()
2520 struct i40e_pf *pf = vsi->back; in i40e_get_priv_flag_strings()
2526 if (pf->hw.pf_id != 0) in i40e_get_priv_flag_strings()
2557 if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) in i40e_get_ts_info()
2560 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | in i40e_get_ts_info()
2567 if (pf->ptp_clock) in i40e_get_ts_info()
2568 info->phc_index = ptp_clock_index(pf->ptp_clock); in i40e_get_ts_info()
2570 info->phc_index = -1; in i40e_get_ts_info()
2572 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); in i40e_get_ts_info()
2574 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | in i40e_get_ts_info()
2579 if (test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) in i40e_get_ts_info()
2580 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | in i40e_get_ts_info()
2595 struct i40e_pf *pf = np->vsi->back; in i40e_link_test()
2600 status = i40e_get_link_status(&pf->hw, &link_up); in i40e_link_test()
2618 struct i40e_pf *pf = np->vsi->back; in i40e_reg_test()
2621 *data = i40e_diag_reg_test(&pf->hw); in i40e_reg_test()
2629 struct i40e_pf *pf = np->vsi->back; in i40e_eeprom_test()
2632 *data = i40e_diag_eeprom_test(&pf->hw); in i40e_eeprom_test()
2635 pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT; in i40e_eeprom_test()
2643 struct i40e_pf *pf = np->vsi->back; in i40e_intr_test()
2644 u16 swc_old = pf->sw_int_count; in i40e_intr_test()
2647 wr32(&pf->hw, I40E_PFINT_DYN_CTL0, in i40e_intr_test()
2654 *data = (swc_old == pf->sw_int_count); in i40e_intr_test()
2661 struct i40e_vf *vfs = pf->vf; in i40e_active_vfs()
2664 for (i = 0; i < pf->num_alloc_vfs; i++) in i40e_active_vfs()
2680 struct i40e_pf *pf = np->vsi->back; in i40e_diag_test()
2682 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { in i40e_diag_test()
2686 set_bit(__I40E_TESTING, pf->state); in i40e_diag_test()
2688 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_diag_test()
2689 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { in i40e_diag_test()
2690 dev_warn(&pf->pdev->dev, in i40e_diag_test()
2696 dev_warn(&pf->pdev->dev, in i40e_diag_test()
2706 /* This reset does not affect link - if it is in i40e_diag_test()
2714 eth_test->flags |= ETH_TEST_FL_FAILED; in i40e_diag_test()
2717 eth_test->flags |= ETH_TEST_FL_FAILED; in i40e_diag_test()
2720 eth_test->flags |= ETH_TEST_FL_FAILED; in i40e_diag_test()
2724 eth_test->flags |= ETH_TEST_FL_FAILED; in i40e_diag_test()
2726 clear_bit(__I40E_TESTING, pf->state); in i40e_diag_test()
2736 eth_test->flags |= ETH_TEST_FL_FAILED; in i40e_diag_test()
2752 eth_test->flags |= ETH_TEST_FL_FAILED; in i40e_diag_test()
2753 clear_bit(__I40E_TESTING, pf->state); in i40e_diag_test()
2761 struct i40e_pf *pf = np->vsi->back; in i40e_get_wol()
2762 struct i40e_hw *hw = &pf->hw; in i40e_get_wol()
2767 if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) { in i40e_get_wol()
2768 wol->supported = 0; in i40e_get_wol()
2769 wol->wolopts = 0; in i40e_get_wol()
2771 wol->supported = WAKE_MAGIC; in i40e_get_wol()
2772 wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0); in i40e_get_wol()
2777 * i40e_set_wol - set the WakeOnLAN configuration
2784 struct i40e_pf *pf = np->vsi->back; in i40e_set_wol()
2785 struct i40e_vsi *vsi = np->vsi; in i40e_set_wol()
2786 struct i40e_hw *hw = &pf->hw; in i40e_set_wol()
2790 if (hw->partition_id != 1) { in i40e_set_wol()
2792 return -EOPNOTSUPP; in i40e_set_wol()
2795 if (vsi != pf->vsi[pf->lan_vsi]) in i40e_set_wol()
2796 return -EOPNOTSUPP; in i40e_set_wol()
2800 if (BIT(hw->port) & wol_nvm_bits) in i40e_set_wol()
2801 return -EOPNOTSUPP; in i40e_set_wol()
2804 if (wol->wolopts & ~WAKE_MAGIC) in i40e_set_wol()
2805 return -EOPNOTSUPP; in i40e_set_wol()
2808 if (pf->wol_en != !!wol->wolopts) { in i40e_set_wol()
2809 pf->wol_en = !!wol->wolopts; in i40e_set_wol()
2810 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); in i40e_set_wol()
2820 struct i40e_pf *pf = np->vsi->back; in i40e_set_phys_id()
2821 struct i40e_hw *hw = &pf->hw; in i40e_set_phys_id()
2828 if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) { in i40e_set_phys_id()
2829 pf->led_status = i40e_led_get(hw); in i40e_set_phys_id()
2831 if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) in i40e_set_phys_id()
2835 &pf->phy_led_val); in i40e_set_phys_id()
2836 pf->led_status = temp_status; in i40e_set_phys_id()
2840 if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) in i40e_set_phys_id()
2843 ret = i40e_led_set_phy(hw, true, pf->led_status, 0); in i40e_set_phys_id()
2846 if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) in i40e_set_phys_id()
2849 ret = i40e_led_set_phy(hw, false, pf->led_status, 0); in i40e_set_phys_id()
2852 if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) { in i40e_set_phys_id()
2853 i40e_led_set(hw, pf->led_status, false); in i40e_set_phys_id()
2855 ret = i40e_led_set_phy(hw, false, pf->led_status, in i40e_set_phys_id()
2856 (pf->phy_led_val | in i40e_set_phys_id()
2858 if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) in i40e_set_phys_id()
2866 return -ENOENT; in i40e_set_phys_id()
2877 * __i40e_get_coalesce - get per-queue coalesce settings
2879 * @ec: ethtool coalesce data structure
2882 * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
2887 struct ethtool_coalesce *ec, in __i40e_get_coalesce() argument
2892 struct i40e_vsi *vsi = np->vsi; in __i40e_get_coalesce()
2894 ec->tx_max_coalesced_frames_irq = vsi->work_limit; in __i40e_get_coalesce()
2901 else if (queue >= vsi->num_queue_pairs) in __i40e_get_coalesce()
2902 return -EINVAL; in __i40e_get_coalesce()
2904 rx_ring = vsi->rx_rings[queue]; in __i40e_get_coalesce()
2905 tx_ring = vsi->tx_rings[queue]; in __i40e_get_coalesce()
2907 if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) in __i40e_get_coalesce()
2908 ec->use_adaptive_rx_coalesce = 1; in __i40e_get_coalesce()
2910 if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) in __i40e_get_coalesce()
2911 ec->use_adaptive_tx_coalesce = 1; in __i40e_get_coalesce()
2913 ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC; in __i40e_get_coalesce()
2914 ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC; in __i40e_get_coalesce()
2922 ec->rx_coalesce_usecs_high = vsi->int_rate_limit; in __i40e_get_coalesce()
2923 ec->tx_coalesce_usecs_high = vsi->int_rate_limit; in __i40e_get_coalesce()
2929 * i40e_get_coalesce - get a netdev's coalesce settings
2931 * @ec: ethtool coalesce data structure
2936 * modified per-queue settings, this only guarantees to represent queue 0. See
2940 struct ethtool_coalesce *ec, in i40e_get_coalesce() argument
2944 return __i40e_get_coalesce(netdev, ec, -1); in i40e_get_coalesce()
2948 * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
2950 * @ec: ethtool's coalesce settings
2956 struct ethtool_coalesce *ec) in i40e_get_per_queue_coalesce() argument
2958 return __i40e_get_coalesce(netdev, ec, queue); in i40e_get_per_queue_coalesce()
2962 * i40e_set_itr_per_queue - set ITR values for specific queue
2964 * @ec: coalesce settings from ethtool
2970 struct ethtool_coalesce *ec, in i40e_set_itr_per_queue() argument
2973 struct i40e_ring *rx_ring = vsi->rx_rings[queue]; in i40e_set_itr_per_queue()
2974 struct i40e_ring *tx_ring = vsi->tx_rings[queue]; in i40e_set_itr_per_queue()
2975 struct i40e_pf *pf = vsi->back; in i40e_set_itr_per_queue()
2976 struct i40e_hw *hw = &pf->hw; in i40e_set_itr_per_queue()
2980 intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit); in i40e_set_itr_per_queue()
2982 rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); in i40e_set_itr_per_queue()
2983 tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); in i40e_set_itr_per_queue()
2985 if (ec->use_adaptive_rx_coalesce) in i40e_set_itr_per_queue()
2986 rx_ring->itr_setting |= I40E_ITR_DYNAMIC; in i40e_set_itr_per_queue()
2988 rx_ring->itr_setting &= ~I40E_ITR_DYNAMIC; in i40e_set_itr_per_queue()
2990 if (ec->use_adaptive_tx_coalesce) in i40e_set_itr_per_queue()
2991 tx_ring->itr_setting |= I40E_ITR_DYNAMIC; in i40e_set_itr_per_queue()
2993 tx_ring->itr_setting &= ~I40E_ITR_DYNAMIC; in i40e_set_itr_per_queue()
2995 q_vector = rx_ring->q_vector; in i40e_set_itr_per_queue()
2996 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); in i40e_set_itr_per_queue()
2998 q_vector = tx_ring->q_vector; in i40e_set_itr_per_queue()
2999 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); in i40e_set_itr_per_queue()
3006 wr32(hw, I40E_PFINT_RATEN(q_vector->reg_idx), intrl); in i40e_set_itr_per_queue()
3011 * __i40e_set_coalesce - set coalesce settings for particular queue
3013 * @ec: ethtool coalesce settings
3019 struct ethtool_coalesce *ec, in __i40e_set_coalesce() argument
3024 struct i40e_vsi *vsi = np->vsi; in __i40e_set_coalesce()
3025 struct i40e_pf *pf = vsi->back; in __i40e_set_coalesce()
3028 if (ec->tx_max_coalesced_frames_irq) in __i40e_set_coalesce()
3029 vsi->work_limit = ec->tx_max_coalesced_frames_irq; in __i40e_set_coalesce()
3032 cur_rx_itr = vsi->rx_rings[0]->itr_setting; in __i40e_set_coalesce()
3033 cur_tx_itr = vsi->tx_rings[0]->itr_setting; in __i40e_set_coalesce()
3034 } else if (queue < vsi->num_queue_pairs) { in __i40e_set_coalesce()
3035 cur_rx_itr = vsi->rx_rings[queue]->itr_setting; in __i40e_set_coalesce()
3036 cur_tx_itr = vsi->tx_rings[queue]->itr_setting; in __i40e_set_coalesce()
3038 netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", in __i40e_set_coalesce()
3039 vsi->num_queue_pairs - 1); in __i40e_set_coalesce()
3040 return -EINVAL; in __i40e_set_coalesce()
3046 /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */ in __i40e_set_coalesce()
3047 if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) { in __i40e_set_coalesce()
3048 netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n"); in __i40e_set_coalesce()
3049 return -EINVAL; in __i40e_set_coalesce()
3052 if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { in __i40e_set_coalesce()
3053 netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n", in __i40e_set_coalesce()
3055 return -EINVAL; in __i40e_set_coalesce()
3058 if (ec->rx_coalesce_usecs != cur_rx_itr && in __i40e_set_coalesce()
3059 ec->use_adaptive_rx_coalesce) { in __i40e_set_coalesce()
3060 …netif_info(pf, drv, netdev, "RX interrupt moderation cannot be changed if adaptive-rx is enabled.\… in __i40e_set_coalesce()
3061 return -EINVAL; in __i40e_set_coalesce()
3064 if (ec->rx_coalesce_usecs > I40E_MAX_ITR) { in __i40e_set_coalesce()
3065 netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); in __i40e_set_coalesce()
3066 return -EINVAL; in __i40e_set_coalesce()
3069 if (ec->tx_coalesce_usecs != cur_tx_itr && in __i40e_set_coalesce()
3070 ec->use_adaptive_tx_coalesce) { in __i40e_set_coalesce()
3071 …netif_info(pf, drv, netdev, "TX interrupt moderation cannot be changed if adaptive-tx is enabled.\… in __i40e_set_coalesce()
3072 return -EINVAL; in __i40e_set_coalesce()
3075 if (ec->tx_coalesce_usecs > I40E_MAX_ITR) { in __i40e_set_coalesce()
3076 netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); in __i40e_set_coalesce()
3077 return -EINVAL; in __i40e_set_coalesce()
3080 if (ec->use_adaptive_rx_coalesce && !cur_rx_itr) in __i40e_set_coalesce()
3081 ec->rx_coalesce_usecs = I40E_MIN_ITR; in __i40e_set_coalesce()
3083 if (ec->use_adaptive_tx_coalesce && !cur_tx_itr) in __i40e_set_coalesce()
3084 ec->tx_coalesce_usecs = I40E_MIN_ITR; in __i40e_set_coalesce()
3086 intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high); in __i40e_set_coalesce()
3087 vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg); in __i40e_set_coalesce()
3088 if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) { in __i40e_set_coalesce()
3090 vsi->int_rate_limit); in __i40e_set_coalesce()
3097 for (i = 0; i < vsi->num_queue_pairs; i++) in __i40e_set_coalesce()
3098 i40e_set_itr_per_queue(vsi, ec, i); in __i40e_set_coalesce()
3100 i40e_set_itr_per_queue(vsi, ec, queue); in __i40e_set_coalesce()
3107 * i40e_set_coalesce - set coalesce settings for every queue on the netdev
3109 * @ec: ethtool coalesce settings
3116 struct ethtool_coalesce *ec, in i40e_set_coalesce() argument
3120 return __i40e_set_coalesce(netdev, ec, -1); in i40e_set_coalesce()
3124 * i40e_set_per_queue_coalesce - set specific queue's coalesce settings
3126 * @ec: ethtool's coalesce settings
3132 struct ethtool_coalesce *ec) in i40e_set_per_queue_coalesce() argument
3134 return __i40e_set_coalesce(netdev, ec, queue); in i40e_set_per_queue_coalesce()
3138 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
3146 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_hash_opts()
3150 cmd->data = 0; in i40e_get_rss_hash_opts()
3152 switch (cmd->flow_type) { in i40e_get_rss_hash_opts()
3176 cmd->data |= RXH_IP_SRC | RXH_IP_DST; in i40e_get_rss_hash_opts()
3179 return -EINVAL; in i40e_get_rss_hash_opts()
3193 cmd->data |= RXH_L4_B_0_1; in i40e_get_rss_hash_opts()
3195 cmd->data |= RXH_L4_B_2_3; in i40e_get_rss_hash_opts()
3197 if (cmd->flow_type == TCP_V4_FLOW || in i40e_get_rss_hash_opts()
3198 cmd->flow_type == UDP_V4_FLOW) { in i40e_get_rss_hash_opts()
3199 if (hw->mac.type == I40E_MAC_X722) { in i40e_get_rss_hash_opts()
3201 cmd->data |= RXH_IP_SRC; in i40e_get_rss_hash_opts()
3203 cmd->data |= RXH_IP_DST; in i40e_get_rss_hash_opts()
3206 cmd->data |= RXH_IP_SRC; in i40e_get_rss_hash_opts()
3208 cmd->data |= RXH_IP_DST; in i40e_get_rss_hash_opts()
3210 } else if (cmd->flow_type == TCP_V6_FLOW || in i40e_get_rss_hash_opts()
3211 cmd->flow_type == UDP_V6_FLOW) { in i40e_get_rss_hash_opts()
3213 cmd->data |= RXH_IP_SRC; in i40e_get_rss_hash_opts()
3215 cmd->data |= RXH_IP_DST; in i40e_get_rss_hash_opts()
3223 * i40e_check_mask - Check whether a mask field is set
3239 return -1; in i40e_check_mask()
3243 * i40e_parse_rx_flow_user_data - Deconstruct user-defined data
3247 * Read the user-defined data and deconstruct the value into a structure. No
3248 * other code should read the user-defined data, so as to ensure that every
3251 * The user-defined field is a 64bit Big Endian format value, which we
3256 * Returns 0 if the data is valid, and non-zero if the userdef data is invalid
3270 if (!(fsp->flow_type & FLOW_EXT)) in i40e_parse_rx_flow_user_data()
3273 value = be64_to_cpu(*((__be64 *)fsp->h_ext.data)); in i40e_parse_rx_flow_user_data()
3274 mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data)); in i40e_parse_rx_flow_user_data()
3282 return -EINVAL; in i40e_parse_rx_flow_user_data()
3284 data->flex_word = value & I40E_USERDEF_FLEX_WORD; in i40e_parse_rx_flow_user_data()
3285 data->flex_offset = in i40e_parse_rx_flow_user_data()
3287 data->flex_filter = true; in i40e_parse_rx_flow_user_data()
3294 * i40e_fill_rx_flow_user_data - Fill in user-defined data field
3306 if (data->flex_filter) { in i40e_fill_rx_flow_user_data()
3307 value |= data->flex_word; in i40e_fill_rx_flow_user_data()
3308 value |= (u64)data->flex_offset << 16; in i40e_fill_rx_flow_user_data()
3313 fsp->flow_type |= FLOW_EXT; in i40e_fill_rx_flow_user_data()
3315 *((__be64 *)fsp->h_ext.data) = cpu_to_be64(value); in i40e_fill_rx_flow_user_data()
3316 *((__be64 *)fsp->m_ext.data) = cpu_to_be64(mask); in i40e_fill_rx_flow_user_data()
3320 * i40e_get_ethtool_fdir_all - Populates the rule count of a command
3328 * Returns 0 on success or -EMSGSIZE if entry not found
3339 cmd->data = i40e_get_fd_cnt_all(pf); in i40e_get_ethtool_fdir_all()
3342 &pf->fdir_filter_list, fdir_node) { in i40e_get_ethtool_fdir_all()
3343 if (cnt == cmd->rule_cnt) in i40e_get_ethtool_fdir_all()
3344 return -EMSGSIZE; in i40e_get_ethtool_fdir_all()
3346 rule_locs[cnt] = rule->fd_id; in i40e_get_ethtool_fdir_all()
3350 cmd->rule_cnt = cnt; in i40e_get_ethtool_fdir_all()
3356 * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
3363 * Returns 0 on success or -EINVAL if filter not found
3369 (struct ethtool_rx_flow_spec *)&cmd->fs; in i40e_get_ethtool_fdir_entry()
3377 &pf->fdir_filter_list, fdir_node) { in i40e_get_ethtool_fdir_entry()
3378 if (fsp->location <= rule->fd_id) in i40e_get_ethtool_fdir_entry()
3382 if (!rule || fsp->location != rule->fd_id) in i40e_get_ethtool_fdir_entry()
3383 return -EINVAL; in i40e_get_ethtool_fdir_entry()
3385 fsp->flow_type = rule->flow_type; in i40e_get_ethtool_fdir_entry()
3386 if (fsp->flow_type == IP_USER_FLOW) { in i40e_get_ethtool_fdir_entry()
3387 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; in i40e_get_ethtool_fdir_entry()
3388 fsp->h_u.usr_ip4_spec.proto = 0; in i40e_get_ethtool_fdir_entry()
3389 fsp->m_u.usr_ip4_spec.proto = 0; in i40e_get_ethtool_fdir_entry()
3392 if (fsp->flow_type == IPV6_USER_FLOW || in i40e_get_ethtool_fdir_entry()
3393 fsp->flow_type == UDP_V6_FLOW || in i40e_get_ethtool_fdir_entry()
3394 fsp->flow_type == TCP_V6_FLOW || in i40e_get_ethtool_fdir_entry()
3395 fsp->flow_type == SCTP_V6_FLOW) { in i40e_get_ethtool_fdir_entry()
3400 fsp->h_u.tcp_ip6_spec.psrc = rule->dst_port; in i40e_get_ethtool_fdir_entry()
3401 fsp->h_u.tcp_ip6_spec.pdst = rule->src_port; in i40e_get_ethtool_fdir_entry()
3402 memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->src_ip6, in i40e_get_ethtool_fdir_entry()
3404 memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->dst_ip6, in i40e_get_ethtool_fdir_entry()
3411 fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port; in i40e_get_ethtool_fdir_entry()
3412 fsp->h_u.tcp_ip4_spec.pdst = rule->src_port; in i40e_get_ethtool_fdir_entry()
3413 fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip; in i40e_get_ethtool_fdir_entry()
3414 fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip; in i40e_get_ethtool_fdir_entry()
3417 switch (rule->flow_type) { in i40e_get_ethtool_fdir_entry()
3449 rule->flow_type); in i40e_get_ethtool_fdir_entry()
3458 fsp->m_u.tcp_ip6_spec.ip6src[0] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3459 fsp->m_u.tcp_ip6_spec.ip6src[1] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3460 fsp->m_u.tcp_ip6_spec.ip6src[2] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3461 fsp->m_u.tcp_ip6_spec.ip6src[3] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3465 fsp->m_u.tcp_ip6_spec.ip6dst[0] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3466 fsp->m_u.tcp_ip6_spec.ip6dst[1] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3467 fsp->m_u.tcp_ip6_spec.ip6dst[2] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3468 fsp->m_u.tcp_ip6_spec.ip6dst[3] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3472 fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3475 fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3478 fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF); in i40e_get_ethtool_fdir_entry()
3481 fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF); in i40e_get_ethtool_fdir_entry()
3483 if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET) in i40e_get_ethtool_fdir_entry()
3484 fsp->ring_cookie = RX_CLS_FLOW_DISC; in i40e_get_ethtool_fdir_entry()
3486 fsp->ring_cookie = rule->q_index; in i40e_get_ethtool_fdir_entry()
3488 if (rule->vlan_tag) { in i40e_get_ethtool_fdir_entry()
3489 fsp->h_ext.vlan_etype = rule->vlan_etype; in i40e_get_ethtool_fdir_entry()
3490 fsp->m_ext.vlan_etype = htons(0xFFFF); in i40e_get_ethtool_fdir_entry()
3491 fsp->h_ext.vlan_tci = rule->vlan_tag; in i40e_get_ethtool_fdir_entry()
3492 fsp->m_ext.vlan_tci = htons(0xFFFF); in i40e_get_ethtool_fdir_entry()
3493 fsp->flow_type |= FLOW_EXT; in i40e_get_ethtool_fdir_entry()
3496 if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) { in i40e_get_ethtool_fdir_entry()
3499 vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); in i40e_get_ethtool_fdir_entry()
3500 if (vsi && vsi->type == I40E_VSI_SRIOV) { in i40e_get_ethtool_fdir_entry()
3501 /* VFs are zero-indexed by the driver, but ethtool in i40e_get_ethtool_fdir_entry()
3502 * expects them to be one-indexed, so add one here in i40e_get_ethtool_fdir_entry()
3504 u64 ring_vf = vsi->vf_id + 1; in i40e_get_ethtool_fdir_entry()
3507 fsp->ring_cookie |= ring_vf; in i40e_get_ethtool_fdir_entry()
3511 if (rule->flex_filter) { in i40e_get_ethtool_fdir_entry()
3513 userdef.flex_word = be16_to_cpu(rule->flex_word); in i40e_get_ethtool_fdir_entry()
3514 userdef.flex_offset = rule->flex_offset; in i40e_get_ethtool_fdir_entry()
3523 * i40e_get_rxnfc - command to get RX flow classification rules
3534 struct i40e_vsi *vsi = np->vsi; in i40e_get_rxnfc()
3535 struct i40e_pf *pf = vsi->back; in i40e_get_rxnfc()
3536 int ret = -EOPNOTSUPP; in i40e_get_rxnfc()
3538 switch (cmd->cmd) { in i40e_get_rxnfc()
3540 cmd->data = vsi->rss_size; in i40e_get_rxnfc()
3547 cmd->rule_cnt = pf->fdir_pf_active_filters; in i40e_get_rxnfc()
3549 cmd->data = i40e_get_fd_cnt_all(pf); in i40e_get_rxnfc()
3566 * i40e_get_rss_hash_bits - Read RSS Hash bits from register
3580 if (nfc->data & RXH_L4_B_0_1) in i40e_get_rss_hash_bits()
3584 if (nfc->data & RXH_L4_B_2_3) in i40e_get_rss_hash_bits()
3589 if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) { in i40e_get_rss_hash_bits()
3592 } else if (nfc->flow_type == TCP_V4_FLOW || in i40e_get_rss_hash_bits()
3593 nfc->flow_type == UDP_V4_FLOW) { in i40e_get_rss_hash_bits()
3594 if (hw->mac.type == I40E_MAC_X722) { in i40e_get_rss_hash_bits()
3606 if (nfc->data & RXH_IP_SRC) in i40e_get_rss_hash_bits()
3610 if (nfc->data & RXH_IP_DST) in i40e_get_rss_hash_bits()
3620 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
3628 struct i40e_hw *hw = &pf->hw; in i40e_set_rss_hash_opt()
3636 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { in i40e_set_rss_hash_opt()
3637 dev_err(&pf->pdev->dev, in i40e_set_rss_hash_opt()
3639 return -EOPNOTSUPP; in i40e_set_rss_hash_opt()
3645 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | in i40e_set_rss_hash_opt()
3647 return -EINVAL; in i40e_set_rss_hash_opt()
3649 switch (nfc->flow_type) { in i40e_set_rss_hash_opt()
3653 pf->hw.caps)) in i40e_set_rss_hash_opt()
3660 pf->hw.caps)) in i40e_set_rss_hash_opt()
3667 pf->hw.caps)) { in i40e_set_rss_hash_opt()
3678 pf->hw.caps)) { in i40e_set_rss_hash_opt()
3690 if ((nfc->data & RXH_L4_B_0_1) || in i40e_set_rss_hash_opt()
3691 (nfc->data & RXH_L4_B_2_3)) in i40e_set_rss_hash_opt()
3692 return -EINVAL; in i40e_set_rss_hash_opt()
3699 if ((nfc->data & RXH_L4_B_0_1) || in i40e_set_rss_hash_opt()
3700 (nfc->data & RXH_L4_B_2_3)) in i40e_set_rss_hash_opt()
3701 return -EINVAL; in i40e_set_rss_hash_opt()
3713 return -EINVAL; in i40e_set_rss_hash_opt()
3722 i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc); in i40e_set_rss_hash_opt()
3740 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
3757 struct i40e_pf *pf = vsi->back; in i40e_update_ethtool_fdir_entry()
3759 int err = -EINVAL; in i40e_update_ethtool_fdir_entry()
3765 &pf->fdir_filter_list, fdir_node) { in i40e_update_ethtool_fdir_entry()
3767 if (rule->fd_id >= sw_idx) in i40e_update_ethtool_fdir_entry()
3773 if (rule && (rule->fd_id == sw_idx)) { in i40e_update_ethtool_fdir_entry()
3778 hlist_del(&rule->fdir_node); in i40e_update_ethtool_fdir_entry()
3780 pf->fdir_pf_active_filters--; in i40e_update_ethtool_fdir_entry()
3790 INIT_HLIST_NODE(&input->fdir_node); in i40e_update_ethtool_fdir_entry()
3794 hlist_add_behind(&input->fdir_node, &parent->fdir_node); in i40e_update_ethtool_fdir_entry()
3796 hlist_add_head(&input->fdir_node, in i40e_update_ethtool_fdir_entry()
3797 &pf->fdir_filter_list); in i40e_update_ethtool_fdir_entry()
3800 pf->fdir_pf_active_filters++; in i40e_update_ethtool_fdir_entry()
3806 * i40e_prune_flex_pit_list - Cleanup unused entries in FLX_PIT table
3819 list_for_each_entry_safe(entry, tmp, &pf->l3_flex_pit_list, list) { in i40e_prune_flex_pit_list()
3822 hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) { in i40e_prune_flex_pit_list()
3823 if (rule->flow_type != IP_USER_FLOW) in i40e_prune_flex_pit_list()
3825 if (rule->flex_filter && in i40e_prune_flex_pit_list()
3826 rule->flex_offset == entry->src_offset) { in i40e_prune_flex_pit_list()
3836 list_del(&entry->list); in i40e_prune_flex_pit_list()
3842 list_for_each_entry_safe(entry, tmp, &pf->l4_flex_pit_list, list) { in i40e_prune_flex_pit_list()
3845 hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) { in i40e_prune_flex_pit_list()
3849 if (rule->flow_type == IP_USER_FLOW) in i40e_prune_flex_pit_list()
3851 if (rule->flex_filter && in i40e_prune_flex_pit_list()
3852 rule->flex_offset == entry->src_offset) { in i40e_prune_flex_pit_list()
3862 list_del(&entry->list); in i40e_prune_flex_pit_list()
3869 * i40e_del_fdir_entry - Deletes a Flow Director filter entry
3882 (struct ethtool_rx_flow_spec *)&cmd->fs; in i40e_del_fdir_entry()
3883 struct i40e_pf *pf = vsi->back; in i40e_del_fdir_entry()
3886 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_del_fdir_entry()
3887 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_del_fdir_entry()
3888 return -EBUSY; in i40e_del_fdir_entry()
3890 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_del_fdir_entry()
3891 return -EBUSY; in i40e_del_fdir_entry()
3893 ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); in i40e_del_fdir_entry()
3902 * i40e_unused_pit_index - Find an unused PIT index for given list
3920 list_for_each_entry(entry, &pf->l4_flex_pit_list, list) in i40e_unused_pit_index()
3921 clear_bit(entry->pit_index, &available_index); in i40e_unused_pit_index()
3923 list_for_each_entry(entry, &pf->l3_flex_pit_list, list) in i40e_unused_pit_index()
3924 clear_bit(entry->pit_index, &available_index); in i40e_unused_pit_index()
3930 * i40e_find_flex_offset - Find an existing flex src_offset
3946 * already programmed, we can simply re-use it. in i40e_find_flex_offset()
3950 if (entry->src_offset == src_offset) in i40e_find_flex_offset()
3960 return ERR_PTR(-ENOSPC); in i40e_find_flex_offset()
3966 * i40e_add_flex_offset - Add src_offset to flex PIT table list
3986 return -ENOMEM; in i40e_add_flex_offset()
3988 new_pit->src_offset = src_offset; in i40e_add_flex_offset()
3989 new_pit->pit_index = pit_index; in i40e_add_flex_offset()
3995 if (new_pit->src_offset < entry->src_offset) { in i40e_add_flex_offset()
3996 list_add_tail(&new_pit->list, &entry->list); in i40e_add_flex_offset()
4004 if (new_pit->src_offset == entry->src_offset) { in i40e_add_flex_offset()
4007 /* If the PIT index is not the same we can't re-use in i40e_add_flex_offset()
4010 if (new_pit->pit_index != entry->pit_index) in i40e_add_flex_offset()
4011 err = -EINVAL; in i40e_add_flex_offset()
4021 list_add_tail(&new_pit->list, flex_pit_list); in i40e_add_flex_offset()
4026 * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table
4039 * This function will reprogram the FLX_PIT register from a book-keeping
4073 u16 offset = entry->src_offset + j; in __i40e_reprogram_flex_pit()
4077 offset - 3); in __i40e_reprogram_flex_pit()
4080 i40e_write_rx_ctl(&pf->hw, in __i40e_reprogram_flex_pit()
4088 i40e_write_rx_ctl(&pf->hw, in __i40e_reprogram_flex_pit()
4090 I40E_FLEX_PREP_VAL(entry->pit_index + 50, in __i40e_reprogram_flex_pit()
4092 entry->src_offset)); in __i40e_reprogram_flex_pit()
4103 last_offset = list_prev_entry(entry, list)->src_offset + 1; in __i40e_reprogram_flex_pit()
4106 i40e_write_rx_ctl(&pf->hw, in __i40e_reprogram_flex_pit()
4115 * i40e_reprogram_flex_pit - Reprogram all FLX_PIT tables after input set change
4123 __i40e_reprogram_flex_pit(pf, &pf->l3_flex_pit_list, in i40e_reprogram_flex_pit()
4126 __i40e_reprogram_flex_pit(pf, &pf->l4_flex_pit_list, in i40e_reprogram_flex_pit()
4130 i40e_write_rx_ctl(&pf->hw, in i40e_reprogram_flex_pit()
4135 i40e_write_rx_ctl(&pf->hw, in i40e_reprogram_flex_pit()
4142 * i40e_flow_str - Converts a flow_type into a human readable string
4150 switch (fsp->flow_type & ~FLOW_EXT) { in i40e_flow_str()
4173 * i40e_pit_index_to_mask - Return the FLEX mask for a given PIT index
4204 * i40e_print_input_set - Show changes between two input sets
4215 struct i40e_pf *pf = vsi->back; in i40e_print_input_set()
4222 netif_info(pf, drv, vsi->netdev, "L3 source address: %s -> %s\n", in i40e_print_input_set()
4229 netif_info(pf, drv, vsi->netdev, "L3 destination address: %s -> %s\n", in i40e_print_input_set()
4236 netif_info(pf, drv, vsi->netdev, "L4 source port: %s -> %s\n", in i40e_print_input_set()
4243 netif_info(pf, drv, vsi->netdev, "L4 destination port: %s -> %s\n", in i40e_print_input_set()
4250 netif_info(pf, drv, vsi->netdev, "SCTP verification tag: %s -> %s\n", in i40e_print_input_set()
4261 netif_info(pf, drv, vsi->netdev, "FLEX index %d: %s -> %s\n", in i40e_print_input_set()
4267 netif_info(pf, drv, vsi->netdev, " Current input set: %0llx\n", in i40e_print_input_set()
4269 netif_info(pf, drv, vsi->netdev, "Requested input set: %0llx\n", in i40e_print_input_set()
4274 * i40e_check_fdir_input_set - Check that a given rx_flow_spec mask is valid
4309 struct i40e_pf *pf = vsi->back; in i40e_check_fdir_input_set()
4318 switch (fsp->flow_type & ~FLOW_EXT) { in i40e_check_fdir_input_set()
4321 fdir_filter_count = &pf->fd_sctp4_filter_cnt; in i40e_check_fdir_input_set()
4325 fdir_filter_count = &pf->fd_tcp4_filter_cnt; in i40e_check_fdir_input_set()
4329 fdir_filter_count = &pf->fd_udp4_filter_cnt; in i40e_check_fdir_input_set()
4333 fdir_filter_count = &pf->fd_sctp6_filter_cnt; in i40e_check_fdir_input_set()
4337 fdir_filter_count = &pf->fd_tcp6_filter_cnt; in i40e_check_fdir_input_set()
4341 fdir_filter_count = &pf->fd_udp6_filter_cnt; in i40e_check_fdir_input_set()
4345 fdir_filter_count = &pf->fd_ip4_filter_cnt; in i40e_check_fdir_input_set()
4350 fdir_filter_count = &pf->fd_ip6_filter_cnt; in i40e_check_fdir_input_set()
4354 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4370 switch (fsp->flow_type & ~FLOW_EXT) { in i40e_check_fdir_input_set()
4376 tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec; in i40e_check_fdir_input_set()
4379 if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF)) in i40e_check_fdir_input_set()
4381 else if (!tcp_ip4_spec->ip4src) in i40e_check_fdir_input_set()
4384 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4387 if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) in i40e_check_fdir_input_set()
4389 else if (!tcp_ip4_spec->ip4dst) in i40e_check_fdir_input_set()
4392 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4395 if (tcp_ip4_spec->psrc == htons(0xFFFF)) in i40e_check_fdir_input_set()
4397 else if (!tcp_ip4_spec->psrc) in i40e_check_fdir_input_set()
4400 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4403 if (tcp_ip4_spec->pdst == htons(0xFFFF)) in i40e_check_fdir_input_set()
4405 else if (!tcp_ip4_spec->pdst) in i40e_check_fdir_input_set()
4408 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4411 if (tcp_ip4_spec->tos) in i40e_check_fdir_input_set()
4412 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4420 tcp_ip6_spec = &fsp->m_u.tcp_ip6_spec; in i40e_check_fdir_input_set()
4423 if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6src, in i40e_check_fdir_input_set()
4427 &tcp_ip6_spec->ip6src)) in i40e_check_fdir_input_set()
4430 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4433 if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6dst, in i40e_check_fdir_input_set()
4437 &tcp_ip6_spec->ip6dst)) in i40e_check_fdir_input_set()
4440 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4443 if (tcp_ip6_spec->psrc == htons(0xFFFF)) in i40e_check_fdir_input_set()
4445 else if (!tcp_ip6_spec->psrc) in i40e_check_fdir_input_set()
4448 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4451 if (tcp_ip6_spec->pdst == htons(0xFFFF)) in i40e_check_fdir_input_set()
4453 else if (!tcp_ip6_spec->pdst) in i40e_check_fdir_input_set()
4456 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4459 if (tcp_ip6_spec->tclass) in i40e_check_fdir_input_set()
4460 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4463 usr_ip4_spec = &fsp->m_u.usr_ip4_spec; in i40e_check_fdir_input_set()
4466 if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF)) in i40e_check_fdir_input_set()
4468 else if (!usr_ip4_spec->ip4src) in i40e_check_fdir_input_set()
4471 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4474 if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) in i40e_check_fdir_input_set()
4476 else if (!usr_ip4_spec->ip4dst) in i40e_check_fdir_input_set()
4479 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4482 if (usr_ip4_spec->l4_4_bytes) in i40e_check_fdir_input_set()
4483 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4486 if (usr_ip4_spec->tos) in i40e_check_fdir_input_set()
4487 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4490 if (usr_ip4_spec->ip_ver) in i40e_check_fdir_input_set()
4491 return -EINVAL; in i40e_check_fdir_input_set()
4494 if (usr_ip4_spec->proto) in i40e_check_fdir_input_set()
4495 return -EINVAL; in i40e_check_fdir_input_set()
4499 usr_ip6_spec = &fsp->m_u.usr_ip6_spec; in i40e_check_fdir_input_set()
4502 if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6src, in i40e_check_fdir_input_set()
4506 &usr_ip6_spec->ip6src)) in i40e_check_fdir_input_set()
4509 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4512 if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6dst, in i40e_check_fdir_input_set()
4516 &usr_ip6_spec->ip6dst)) in i40e_check_fdir_input_set()
4519 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4521 if (usr_ip6_spec->l4_4_bytes) in i40e_check_fdir_input_set()
4522 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4525 if (usr_ip6_spec->tclass) in i40e_check_fdir_input_set()
4526 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4529 if (usr_ip6_spec->l4_proto) in i40e_check_fdir_input_set()
4530 return -EINVAL; in i40e_check_fdir_input_set()
4534 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4537 if (fsp->flow_type & FLOW_EXT) { in i40e_check_fdir_input_set()
4541 if (fsp->h_ext.vlan_etype != htons(ETH_P_8021Q) && in i40e_check_fdir_input_set()
4542 fsp->h_ext.vlan_etype != 0) in i40e_check_fdir_input_set()
4543 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4544 if (fsp->m_ext.vlan_tci == htons(0xFFFF)) in i40e_check_fdir_input_set()
4558 if (userdef->flex_filter) { in i40e_check_fdir_input_set()
4562 * must be aligned on 2-byte boundary. in i40e_check_fdir_input_set()
4564 if (userdef->flex_offset & 0x1) { in i40e_check_fdir_input_set()
4565 dev_warn(&pf->pdev->dev, in i40e_check_fdir_input_set()
4566 "Flexible data offset must be 2-byte aligned\n"); in i40e_check_fdir_input_set()
4567 return -EINVAL; in i40e_check_fdir_input_set()
4570 src_offset = userdef->flex_offset >> 1; in i40e_check_fdir_input_set()
4574 dev_warn(&pf->pdev->dev, in i40e_check_fdir_input_set()
4576 return -EINVAL; in i40e_check_fdir_input_set()
4584 flex_pit = i40e_find_flex_offset(&pf->l4_flex_pit_list, in i40e_check_fdir_input_set()
4598 i40e_find_flex_offset(&pf->l3_flex_pit_list, in i40e_check_fdir_input_set()
4609 if (l3_flex_pit->pit_index != in i40e_check_fdir_input_set()
4610 flex_pit->pit_index) { in i40e_check_fdir_input_set()
4611 return -EINVAL; in i40e_check_fdir_input_set()
4630 pit_index = flex_pit->pit_index; in i40e_check_fdir_input_set()
4644 netif_info(pf, drv, vsi->netdev, "Input set change requested for %s flows:\n", in i40e_check_fdir_input_set()
4648 netif_info(pf, drv, vsi->netdev, "FLEX index %d: Offset -> %d", in i40e_check_fdir_input_set()
4656 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { in i40e_check_fdir_input_set()
4657 netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n"); in i40e_check_fdir_input_set()
4658 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4671 …netif_err(pf, drv, vsi->netdev, "Cannot change input set for %s flows until %d preexisting filters… in i40e_check_fdir_input_set()
4674 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4691 err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset, in i40e_check_fdir_input_set()
4697 err = i40e_add_flex_offset(&pf->l3_flex_pit_list, in i40e_check_fdir_input_set()
4711 * i40e_match_fdir_filter - Return true of two filters match
4717 * check any input-set since all filters of the same flow type must use the
4724 if (a->dst_ip != b->dst_ip || in i40e_match_fdir_filter()
4725 a->src_ip != b->src_ip || in i40e_match_fdir_filter()
4726 a->dst_port != b->dst_port || in i40e_match_fdir_filter()
4727 a->src_port != b->src_port || in i40e_match_fdir_filter()
4728 a->flow_type != b->flow_type || in i40e_match_fdir_filter()
4729 a->ipl4_proto != b->ipl4_proto || in i40e_match_fdir_filter()
4730 a->vlan_tag != b->vlan_tag || in i40e_match_fdir_filter()
4731 a->vlan_etype != b->vlan_etype) in i40e_match_fdir_filter()
4738 * i40e_disallow_matching_filters - Check that new filters differ
4765 struct i40e_pf *pf = vsi->back; in i40e_disallow_matching_filters()
4771 &pf->fdir_filter_list, fdir_node) { in i40e_disallow_matching_filters()
4776 if (rule->fd_id == input->fd_id) in i40e_disallow_matching_filters()
4783 dev_warn(&pf->pdev->dev, in i40e_disallow_matching_filters()
4785 rule->fd_id); in i40e_disallow_matching_filters()
4786 return -EINVAL; in i40e_disallow_matching_filters()
4794 * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
4809 int ret = -EINVAL; in i40e_add_fdir_ethtool()
4813 return -EINVAL; in i40e_add_fdir_ethtool()
4814 pf = vsi->back; in i40e_add_fdir_ethtool()
4816 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) in i40e_add_fdir_ethtool()
4817 return -EOPNOTSUPP; in i40e_add_fdir_ethtool()
4819 if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_add_fdir_ethtool()
4820 return -ENOSPC; in i40e_add_fdir_ethtool()
4822 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_add_fdir_ethtool()
4823 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_add_fdir_ethtool()
4824 return -EBUSY; in i40e_add_fdir_ethtool()
4826 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_add_fdir_ethtool()
4827 return -EBUSY; in i40e_add_fdir_ethtool()
4829 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; in i40e_add_fdir_ethtool()
4831 /* Parse the user-defined field */ in i40e_add_fdir_ethtool()
4833 return -EINVAL; in i40e_add_fdir_ethtool()
4836 if (fsp->flow_type & FLOW_MAC_EXT) in i40e_add_fdir_ethtool()
4837 return -EINVAL; in i40e_add_fdir_ethtool()
4843 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + in i40e_add_fdir_ethtool()
4844 pf->hw.func_caps.fd_filters_guaranteed)) { in i40e_add_fdir_ethtool()
4845 return -EINVAL; in i40e_add_fdir_ethtool()
4851 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { in i40e_add_fdir_ethtool()
4854 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); in i40e_add_fdir_ethtool()
4855 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); in i40e_add_fdir_ethtool()
4858 if (ring >= vsi->num_queue_pairs) in i40e_add_fdir_ethtool()
4859 return -EINVAL; in i40e_add_fdir_ethtool()
4860 dest_vsi = vsi->id; in i40e_add_fdir_ethtool()
4862 /* VFs are zero-indexed, so we subtract one here */ in i40e_add_fdir_ethtool()
4863 vf--; in i40e_add_fdir_ethtool()
4865 if (vf >= pf->num_alloc_vfs) in i40e_add_fdir_ethtool()
4866 return -EINVAL; in i40e_add_fdir_ethtool()
4867 if (ring >= pf->vf[vf].num_queue_pairs) in i40e_add_fdir_ethtool()
4868 return -EINVAL; in i40e_add_fdir_ethtool()
4869 dest_vsi = pf->vf[vf].lan_vsi_id; in i40e_add_fdir_ethtool()
4878 return -ENOMEM; in i40e_add_fdir_ethtool()
4880 input->fd_id = fsp->location; in i40e_add_fdir_ethtool()
4881 input->q_index = q_index; in i40e_add_fdir_ethtool()
4882 input->dest_vsi = dest_vsi; in i40e_add_fdir_ethtool()
4883 input->dest_ctl = dest_ctl; in i40e_add_fdir_ethtool()
4884 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; in i40e_add_fdir_ethtool()
4885 input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); in i40e_add_fdir_ethtool()
4886 input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src; in i40e_add_fdir_ethtool()
4887 input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst; in i40e_add_fdir_ethtool()
4888 input->flow_type = fsp->flow_type & ~FLOW_EXT; in i40e_add_fdir_ethtool()
4890 input->vlan_etype = fsp->h_ext.vlan_etype; in i40e_add_fdir_ethtool()
4891 if (!fsp->m_ext.vlan_etype && fsp->h_ext.vlan_tci) in i40e_add_fdir_ethtool()
4892 input->vlan_etype = cpu_to_be16(ETH_P_8021Q); in i40e_add_fdir_ethtool()
4893 if (fsp->m_ext.vlan_tci && input->vlan_etype) in i40e_add_fdir_ethtool()
4894 input->vlan_tag = fsp->h_ext.vlan_tci; in i40e_add_fdir_ethtool()
4895 if (input->flow_type == IPV6_USER_FLOW || in i40e_add_fdir_ethtool()
4896 input->flow_type == UDP_V6_FLOW || in i40e_add_fdir_ethtool()
4897 input->flow_type == TCP_V6_FLOW || in i40e_add_fdir_ethtool()
4898 input->flow_type == SCTP_V6_FLOW) { in i40e_add_fdir_ethtool()
4903 input->ipl4_proto = fsp->h_u.usr_ip6_spec.l4_proto; in i40e_add_fdir_ethtool()
4904 input->dst_port = fsp->h_u.tcp_ip6_spec.psrc; in i40e_add_fdir_ethtool()
4905 input->src_port = fsp->h_u.tcp_ip6_spec.pdst; in i40e_add_fdir_ethtool()
4906 memcpy(input->dst_ip6, fsp->h_u.ah_ip6_spec.ip6src, in i40e_add_fdir_ethtool()
4908 memcpy(input->src_ip6, fsp->h_u.ah_ip6_spec.ip6dst, in i40e_add_fdir_ethtool()
4915 input->ipl4_proto = fsp->h_u.usr_ip4_spec.proto; in i40e_add_fdir_ethtool()
4916 input->dst_port = fsp->h_u.tcp_ip4_spec.psrc; in i40e_add_fdir_ethtool()
4917 input->src_port = fsp->h_u.tcp_ip4_spec.pdst; in i40e_add_fdir_ethtool()
4918 input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src; in i40e_add_fdir_ethtool()
4919 input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst; in i40e_add_fdir_ethtool()
4923 input->flex_filter = true; in i40e_add_fdir_ethtool()
4924 input->flex_word = cpu_to_be16(userdef.flex_word); in i40e_add_fdir_ethtool()
4925 input->flex_offset = userdef.flex_offset; in i40e_add_fdir_ethtool()
4935 * to the list as this would cause a use-after-free bug. in i40e_add_fdir_ethtool()
4937 i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); in i40e_add_fdir_ethtool()
4944 hlist_del(&input->fdir_node); in i40e_add_fdir_ethtool()
4945 pf->fdir_pf_active_filters--; in i40e_add_fdir_ethtool()
4952 * i40e_set_rxnfc - command to set RX flow classification rules
4961 struct i40e_vsi *vsi = np->vsi; in i40e_set_rxnfc()
4962 struct i40e_pf *pf = vsi->back; in i40e_set_rxnfc()
4963 int ret = -EOPNOTSUPP; in i40e_set_rxnfc()
4965 switch (cmd->cmd) { in i40e_set_rxnfc()
4983 * i40e_max_channels - get Max number of combined channels supported
4989 return vsi->alloc_queue_pairs; in i40e_max_channels()
4993 * i40e_get_channels - Get the current channels enabled and max supported etc.
5006 struct i40e_vsi *vsi = np->vsi; in i40e_get_channels()
5007 struct i40e_pf *pf = vsi->back; in i40e_get_channels()
5010 ch->max_combined = i40e_max_channels(vsi); in i40e_get_channels()
5013 ch->other_count = test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0; in i40e_get_channels()
5014 ch->max_other = ch->other_count; in i40e_get_channels()
5017 ch->combined_count = vsi->num_queue_pairs; in i40e_get_channels()
5021 * i40e_set_channels - Set the new channels count.
5033 unsigned int count = ch->combined_count; in i40e_set_channels()
5034 struct i40e_vsi *vsi = np->vsi; in i40e_set_channels()
5035 struct i40e_pf *pf = vsi->back; in i40e_set_channels()
5042 if (vsi->type != I40E_VSI_MAIN) in i40e_set_channels()
5043 return -EINVAL; in i40e_set_channels()
5049 return -EINVAL; in i40e_set_channels()
5052 if (!count || ch->rx_count || ch->tx_count) in i40e_set_channels()
5053 return -EINVAL; in i40e_set_channels()
5056 if (ch->other_count != (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0)) in i40e_set_channels()
5057 return -EINVAL; in i40e_set_channels()
5061 return -EINVAL; in i40e_set_channels()
5067 &pf->fdir_filter_list, fdir_node) { in i40e_set_channels()
5068 if (rule->dest_ctl != drop && count <= rule->q_index) { in i40e_set_channels()
5069 dev_warn(&pf->pdev->dev, in i40e_set_channels()
5071 rule->fd_id, rule->q_index); in i40e_set_channels()
5072 err = -EINVAL; in i40e_set_channels()
5077 dev_err(&pf->pdev->dev, in i40e_set_channels()
5093 return -EINVAL; in i40e_set_channels()
5097 * i40e_get_rxfh_key_size - get the RSS hash key size
5108 * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size
5119 * i40e_get_rxfh - get the rx flow hash indirection table
5130 struct i40e_vsi *vsi = np->vsi; in i40e_get_rxfh()
5135 rxfh->hfunc = ETH_RSS_HASH_TOP; in i40e_get_rxfh()
5137 if (!rxfh->indir) in i40e_get_rxfh()
5140 seed = rxfh->key; in i40e_get_rxfh()
5143 return -ENOMEM; in i40e_get_rxfh()
5148 rxfh->indir[i] = (u32)(lut[i]); in i40e_get_rxfh()
5157 * i40e_set_rxfh - set the rx flow hash indirection table
5162 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
5170 struct i40e_vsi *vsi = np->vsi; in i40e_set_rxfh()
5171 struct i40e_pf *pf = vsi->back; in i40e_set_rxfh()
5175 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && in i40e_set_rxfh()
5176 rxfh->hfunc != ETH_RSS_HASH_TOP) in i40e_set_rxfh()
5177 return -EOPNOTSUPP; in i40e_set_rxfh()
5179 if (rxfh->key) { in i40e_set_rxfh()
5180 if (!vsi->rss_hkey_user) { in i40e_set_rxfh()
5181 vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE, in i40e_set_rxfh()
5183 if (!vsi->rss_hkey_user) in i40e_set_rxfh()
5184 return -ENOMEM; in i40e_set_rxfh()
5186 memcpy(vsi->rss_hkey_user, rxfh->key, I40E_HKEY_ARRAY_SIZE); in i40e_set_rxfh()
5187 seed = vsi->rss_hkey_user; in i40e_set_rxfh()
5189 if (!vsi->rss_lut_user) { in i40e_set_rxfh()
5190 vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); in i40e_set_rxfh()
5191 if (!vsi->rss_lut_user) in i40e_set_rxfh()
5192 return -ENOMEM; in i40e_set_rxfh()
5196 if (rxfh->indir) in i40e_set_rxfh()
5198 vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]); in i40e_set_rxfh()
5200 i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE, in i40e_set_rxfh()
5201 vsi->rss_size); in i40e_set_rxfh()
5203 return i40e_config_rss(vsi, seed, vsi->rss_lut_user, in i40e_set_rxfh()
5208 * i40e_get_priv_flags - report device private flags
5220 struct i40e_vsi *vsi = np->vsi; in i40e_get_priv_flags()
5221 struct i40e_pf *pf = vsi->back; in i40e_get_priv_flags()
5229 if (test_bit(priv_flag->bitno, pf->flags)) in i40e_get_priv_flags()
5233 if (pf->hw.pf_id != 0) in i40e_get_priv_flags()
5241 if (test_bit(priv_flag->bitno, pf->flags)) in i40e_get_priv_flags()
5249 * i40e_set_priv_flags - set private flags
5260 struct i40e_vsi *vsi = np->vsi; in i40e_set_priv_flags()
5261 struct i40e_pf *pf = vsi->back; in i40e_set_priv_flags()
5266 bitmap_copy(orig_flags, pf->flags, I40E_PF_FLAGS_NBITS); in i40e_set_priv_flags()
5267 bitmap_copy(new_flags, pf->flags, I40E_PF_FLAGS_NBITS); in i40e_set_priv_flags()
5276 /* If this is a read-only flag, it can't be changed */ in i40e_set_priv_flags()
5277 if (priv_flag->read_only && in i40e_set_priv_flags()
5278 test_bit(priv_flag->bitno, orig_flags) != new_val) in i40e_set_priv_flags()
5279 return -EOPNOTSUPP; in i40e_set_priv_flags()
5282 set_bit(priv_flag->bitno, new_flags); in i40e_set_priv_flags()
5284 clear_bit(priv_flag->bitno, new_flags); in i40e_set_priv_flags()
5287 if (pf->hw.pf_id != 0) in i40e_set_priv_flags()
5297 /* If this is a read-only flag, it can't be changed */ in i40e_set_priv_flags()
5298 if (priv_flag->read_only && in i40e_set_priv_flags()
5299 test_bit(priv_flag->bitno, orig_flags) != new_val) in i40e_set_priv_flags()
5300 return -EOPNOTSUPP; in i40e_set_priv_flags()
5303 set_bit(priv_flag->bitno, new_flags); in i40e_set_priv_flags()
5305 clear_bit(priv_flag->bitno, new_flags); in i40e_set_priv_flags()
5309 bitmap_xor(changed_flags, pf->flags, orig_flags, I40E_PF_FLAGS_NBITS); in i40e_set_priv_flags()
5325 !test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps)) in i40e_set_priv_flags()
5326 return -EOPNOTSUPP; in i40e_set_priv_flags()
5330 * - on XL710 if NPAR is enabled or FW API version < 1.7 in i40e_set_priv_flags()
5331 * - on X722 with FW API version < 1.6 in i40e_set_priv_flags()
5337 !test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps)) { in i40e_set_priv_flags()
5338 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5340 return -EOPNOTSUPP; in i40e_set_priv_flags()
5344 pf->hw.device_id != I40E_DEV_ID_25G_SFP28 && in i40e_set_priv_flags()
5345 pf->hw.device_id != I40E_DEV_ID_25G_B) { in i40e_set_priv_flags()
5346 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5348 return -EOPNOTSUPP; in i40e_set_priv_flags()
5352 pf->hw.device_id != I40E_DEV_ID_25G_SFP28 && in i40e_set_priv_flags()
5353 pf->hw.device_id != I40E_DEV_ID_25G_B && in i40e_set_priv_flags()
5354 pf->hw.device_id != I40E_DEV_ID_KX_X722) { in i40e_set_priv_flags()
5355 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5357 return -EOPNOTSUPP; in i40e_set_priv_flags()
5368 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_set_priv_flags()
5369 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); in i40e_set_priv_flags()
5379 ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags, in i40e_set_priv_flags()
5381 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { in i40e_set_priv_flags()
5382 dev_info(&pf->pdev->dev, in i40e_set_priv_flags()
5385 i40e_aq_str(&pf->hw, in i40e_set_priv_flags()
5386 pf->hw.aq.asq_last_status)); in i40e_set_priv_flags()
5406 dev_warn(&pf->pdev->dev, "Cannot change FEC config\n"); in i40e_set_priv_flags()
5411 dev_err(&pf->pdev->dev, in i40e_set_priv_flags()
5412 …"Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n"… in i40e_set_priv_flags()
5413 return -EOPNOTSUPP; in i40e_set_priv_flags()
5417 pf->num_alloc_vfs) { in i40e_set_priv_flags()
5418 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5419 "Changing vf-vlan-pruning flag while VF(s) are active is not supported\n"); in i40e_set_priv_flags()
5420 return -EOPNOTSUPP; in i40e_set_priv_flags()
5425 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5427 return -EOPNOTSUPP; in i40e_set_priv_flags()
5433 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5434 "Turning on link-down-on-close flag may affect other partitions\n"); in i40e_set_priv_flags()
5441 i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL); in i40e_set_priv_flags()
5442 i40e_aq_stop_lldp(&pf->hw, true, false, NULL); in i40e_set_priv_flags()
5444 status = i40e_aq_start_lldp(&pf->hw, false, NULL); in i40e_set_priv_flags()
5446 adq_err = pf->hw.aq.asq_last_status; in i40e_set_priv_flags()
5449 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5454 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5456 return -EINVAL; in i40e_set_priv_flags()
5458 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5460 return -EBUSY; in i40e_set_priv_flags()
5462 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5465 i40e_aq_str(&pf->hw, in i40e_set_priv_flags()
5467 return -EINVAL; in i40e_set_priv_flags()
5478 bitmap_copy(pf->flags, new_flags, I40E_PF_FLAGS_NBITS); in i40e_set_priv_flags()
5490 * i40e_get_module_info - get (Q)SFP+ module type info
5498 struct i40e_vsi *vsi = np->vsi; in i40e_get_module_info()
5499 struct i40e_pf *pf = vsi->back; in i40e_get_module_info()
5500 struct i40e_hw *hw = &pf->hw; in i40e_get_module_info()
5508 if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { in i40e_get_module_info()
5509 …netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n"); in i40e_get_module_info()
5510 return -EINVAL; in i40e_get_module_info()
5515 return -EIO; in i40e_get_module_info()
5517 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) { in i40e_get_module_info()
5518 netdev_err(vsi->netdev, "Cannot read module EEPROM memory. No module connected.\n"); in i40e_get_module_info()
5519 return -EINVAL; in i40e_get_module_info()
5522 type = hw->phy.link_info.module_type[0]; in i40e_get_module_info()
5532 return -EIO; in i40e_get_module_info()
5540 return -EIO; in i40e_get_module_info()
5546 netdev_warn(vsi->netdev, "Module address swap to access page 0xA2 is not supported.\n"); in i40e_get_module_info()
5547 modinfo->type = ETH_MODULE_SFF_8079; in i40e_get_module_info()
5548 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; in i40e_get_module_info()
5550 /* Module is not SFF-8472 compliant */ in i40e_get_module_info()
5551 modinfo->type = ETH_MODULE_SFF_8079; in i40e_get_module_info()
5552 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; in i40e_get_module_info()
5554 /* Module is SFF-8472 compliant but doesn't implement in i40e_get_module_info()
5557 modinfo->type = ETH_MODULE_SFF_8079; in i40e_get_module_info()
5558 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; in i40e_get_module_info()
5560 modinfo->type = ETH_MODULE_SFF_8472; in i40e_get_module_info()
5561 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; in i40e_get_module_info()
5572 return -EIO; in i40e_get_module_info()
5575 /* Module is SFF-8636 compliant */ in i40e_get_module_info()
5576 modinfo->type = ETH_MODULE_SFF_8636; in i40e_get_module_info()
5577 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; in i40e_get_module_info()
5579 modinfo->type = ETH_MODULE_SFF_8436; in i40e_get_module_info()
5580 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; in i40e_get_module_info()
5584 modinfo->type = ETH_MODULE_SFF_8636; in i40e_get_module_info()
5585 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; in i40e_get_module_info()
5588 netdev_dbg(vsi->netdev, "SFP module type unrecognized or no SFP connector used.\n"); in i40e_get_module_info()
5589 return -EOPNOTSUPP; in i40e_get_module_info()
5595 * i40e_get_module_eeprom - fills buffer with (Q)SFP+ module memory contents
5605 struct i40e_vsi *vsi = np->vsi; in i40e_get_module_eeprom()
5606 struct i40e_pf *pf = vsi->back; in i40e_get_module_eeprom()
5607 struct i40e_hw *hw = &pf->hw; in i40e_get_module_eeprom()
5613 if (!ee || !ee->len || !data) in i40e_get_module_eeprom()
5614 return -EINVAL; in i40e_get_module_eeprom()
5616 if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP) in i40e_get_module_eeprom()
5619 for (i = 0; i < ee->len; i++) { in i40e_get_module_eeprom()
5620 u32 offset = i + ee->offset; in i40e_get_module_eeprom()
5626 offset -= ETH_MODULE_SFF_8079_LEN; in i40e_get_module_eeprom()
5632 offset -= ETH_MODULE_SFF_8436_LEN / 2; in i40e_get_module_eeprom()
5641 return -EIO; in i40e_get_module_eeprom()
5651 struct i40e_vsi *vsi = np->vsi; in i40e_get_eee()
5652 struct i40e_pf *pf = vsi->back; in i40e_get_eee()
5653 struct i40e_hw *hw = &pf->hw; in i40e_get_eee()
5659 return -EAGAIN; in i40e_get_eee()
5665 return -EOPNOTSUPP; in i40e_get_eee()
5667 edata->supported = SUPPORTED_Autoneg; in i40e_get_eee()
5668 edata->lp_advertised = edata->supported; in i40e_get_eee()
5673 return -EAGAIN; in i40e_get_eee()
5675 edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U; in i40e_get_eee()
5676 edata->eee_enabled = !!edata->advertised; in i40e_get_eee()
5677 edata->tx_lpi_enabled = pf->stats.tx_lpi_status; in i40e_get_eee()
5679 edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status; in i40e_get_eee()
5688 struct i40e_vsi *vsi = np->vsi; in i40e_is_eee_param_supported()
5689 struct i40e_pf *pf = vsi->back; in i40e_is_eee_param_supported()
5694 {edata->advertised & ~SUPPORTED_Autoneg, "advertise"}, in i40e_is_eee_param_supported()
5695 {edata->tx_lpi_timer, "tx-timer"}, in i40e_is_eee_param_supported()
5696 {edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"} in i40e_is_eee_param_supported()
5705 return -EOPNOTSUPP; in i40e_is_eee_param_supported()
5717 struct i40e_vsi *vsi = np->vsi; in i40e_set_eee()
5718 struct i40e_pf *pf = vsi->back; in i40e_set_eee()
5719 struct i40e_hw *hw = &pf->hw; in i40e_set_eee()
5725 return -EOPNOTSUPP; in i40e_set_eee()
5731 return -EAGAIN; in i40e_set_eee()
5737 return -EOPNOTSUPP; in i40e_set_eee()
5746 return -EAGAIN; in i40e_set_eee()
5760 if (edata->eee_enabled) { in i40e_set_eee()
5771 return -EAGAIN; in i40e_set_eee()
5839 struct i40e_pf *pf = np->vsi->back; in i40e_set_ethtool_ops()
5841 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_set_ethtool_ops()
5842 netdev->ethtool_ops = &i40e_ethtool_ops; in i40e_set_ethtool_ops()
5844 netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops; in i40e_set_ethtool_ops()