Lines Matching full:pf
35 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
38 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
39 static int i40e_setup_misc_vector(struct i40e_pf *pf);
40 static void i40e_determine_queue_usage(struct i40e_pf *pf);
41 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
42 static void i40e_prep_for_reset(struct i40e_pf *pf);
43 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
45 static int i40e_reset(struct i40e_pf *pf);
46 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
47 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
48 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
49 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
50 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
51 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
53 static int i40e_get_capabilities(struct i40e_pf *pf,
55 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
137 struct i40e_pf *pf = i40e_hw_to_pf(hw); in i40e_hw_to_dev() local
139 return &pf->pdev->dev; in i40e_hw_to_dev()
152 struct i40e_pf *pf = i40e_hw_to_pf(hw); in i40e_allocate_dma_mem() local
155 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, in i40e_allocate_dma_mem()
170 struct i40e_pf *pf = i40e_hw_to_pf(hw); in i40e_free_dma_mem() local
172 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); in i40e_free_dma_mem()
215 * @pf: board private structure
222 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, in i40e_get_lump() argument
229 dev_info(&pf->pdev->dev, in i40e_get_lump()
238 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { in i40e_get_lump()
240 dev_err(&pf->pdev->dev, in i40e_get_lump()
308 * @pf: the pf structure to search for the vsi
311 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) in i40e_find_vsi_from_id() argument
315 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_find_vsi_from_id()
316 if (pf->vsi[i] && (pf->vsi[i]->id == id)) in i40e_find_vsi_from_id()
317 return pf->vsi[i]; in i40e_find_vsi_from_id()
324 * @pf: board private structure
328 void i40e_service_event_schedule(struct i40e_pf *pf) in i40e_service_event_schedule() argument
330 if ((!test_bit(__I40E_DOWN, pf->state) && in i40e_service_event_schedule()
331 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) || in i40e_service_event_schedule()
332 test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_service_event_schedule()
333 queue_work(i40e_wq, &pf->service_task); in i40e_service_event_schedule()
349 struct i40e_pf *pf = vsi->back; in i40e_tx_timeout() local
354 pf->tx_timeout_count++; in i40e_tx_timeout()
367 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) in i40e_tx_timeout()
368 pf->tx_timeout_recovery_level = 1; /* reset after some time */ in i40e_tx_timeout()
370 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) in i40e_tx_timeout()
374 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state)) in i40e_tx_timeout()
380 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_tx_timeout()
381 val = rd32(&pf->hw, in i40e_tx_timeout()
385 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); in i40e_tx_timeout()
393 pf->tx_timeout_last_recovery = jiffies; in i40e_tx_timeout()
395 pf->tx_timeout_recovery_level, txqueue); in i40e_tx_timeout()
397 switch (pf->tx_timeout_recovery_level) { in i40e_tx_timeout()
399 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
402 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
405 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
409 set_bit(__I40E_DOWN_REQUESTED, pf->state); in i40e_tx_timeout()
414 i40e_service_event_schedule(pf); in i40e_tx_timeout()
415 pf->tx_timeout_recovery_level++; in i40e_tx_timeout()
550 * i40e_pf_reset_stats - Reset all of the stats for the given PF
551 * @pf: the PF to be reset
553 void i40e_pf_reset_stats(struct i40e_pf *pf) in i40e_pf_reset_stats() argument
557 memset(&pf->stats, 0, sizeof(pf->stats)); in i40e_pf_reset_stats()
558 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); in i40e_pf_reset_stats()
559 pf->stat_offsets_loaded = false; in i40e_pf_reset_stats()
562 if (pf->veb[i]) { in i40e_pf_reset_stats()
563 memset(&pf->veb[i]->stats, 0, in i40e_pf_reset_stats()
564 sizeof(pf->veb[i]->stats)); in i40e_pf_reset_stats()
565 memset(&pf->veb[i]->stats_offsets, 0, in i40e_pf_reset_stats()
566 sizeof(pf->veb[i]->stats_offsets)); in i40e_pf_reset_stats()
567 memset(&pf->veb[i]->tc_stats, 0, in i40e_pf_reset_stats()
568 sizeof(pf->veb[i]->tc_stats)); in i40e_pf_reset_stats()
569 memset(&pf->veb[i]->tc_stats_offsets, 0, in i40e_pf_reset_stats()
570 sizeof(pf->veb[i]->tc_stats_offsets)); in i40e_pf_reset_stats()
571 pf->veb[i]->stat_offsets_loaded = false; in i40e_pf_reset_stats()
574 pf->hw_csum_rx_error = 0; in i40e_pf_reset_stats()
720 struct i40e_pf *pf = vsi->back; in i40e_update_eth_stats() local
721 struct i40e_hw *hw = &pf->hw; in i40e_update_eth_stats()
782 struct i40e_pf *pf = veb->pf; in i40e_update_veb_stats() local
783 struct i40e_hw *hw = &pf->hw; in i40e_update_veb_stats()
868 struct i40e_pf *pf = vsi->back; in i40e_update_vsi_stats() local
885 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_update_vsi_stats()
995 /* pull in a couple PF stats if this is the main vsi */ in i40e_update_vsi_stats()
996 if (vsi == pf->vsi[pf->lan_vsi]) { in i40e_update_vsi_stats()
997 ns->rx_crc_errors = pf->stats.crc_errors; in i40e_update_vsi_stats()
998 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; in i40e_update_vsi_stats()
999 ns->rx_length_errors = pf->stats.rx_length_errors; in i40e_update_vsi_stats()
1004 * i40e_update_pf_stats - Update the PF statistics counters.
1005 * @pf: the PF to be updated
1007 static void i40e_update_pf_stats(struct i40e_pf *pf) in i40e_update_pf_stats() argument
1009 struct i40e_hw_port_stats *osd = &pf->stats_offsets; in i40e_update_pf_stats()
1010 struct i40e_hw_port_stats *nsd = &pf->stats; in i40e_update_pf_stats()
1011 struct i40e_hw *hw = &pf->hw; in i40e_update_pf_stats()
1017 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1021 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1024 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1029 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1034 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1039 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1044 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1049 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1054 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1059 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1064 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1068 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1072 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1076 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1081 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1086 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1089 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1092 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1095 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1100 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1104 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1108 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1112 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1117 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1124 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1128 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1132 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1136 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1140 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1144 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1148 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1153 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1157 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1161 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1165 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1169 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1173 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1177 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1181 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1184 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1187 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1190 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1210 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1213 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1216 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && in i40e_update_pf_stats()
1217 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_update_pf_stats()
1222 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && in i40e_update_pf_stats()
1223 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_update_pf_stats()
1228 pf->stat_offsets_loaded = true; in i40e_update_pf_stats()
1239 struct i40e_pf *pf = vsi->back; in i40e_update_stats() local
1241 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_update_stats()
1242 i40e_update_pf_stats(pf); in i40e_update_stats()
1482 struct i40e_pf *pf = vsi->back; in i40e_get_vf_new_vlan() local
1492 !test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, pf->flags)); in i40e_get_vf_new_vlan()
1571 * @vsi: the PF Main VSI - inappropriate for any other VSI
1580 struct i40e_pf *pf = vsi->back; in i40e_rm_default_mac_filter() local
1582 /* Only appropriate for the PF main VSI */ in i40e_rm_default_mac_filter()
1591 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1599 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1798 struct i40e_pf *pf = vsi->back; in i40e_set_mac() local
1799 struct i40e_hw *hw = &pf->hw; in i40e_set_mac()
1805 if (test_bit(__I40E_DOWN, pf->state) || in i40e_set_mac()
1806 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_set_mac()
1841 i40e_service_event_schedule(pf); in i40e_set_mac()
1855 struct i40e_pf *pf = vsi->back; in i40e_config_rss_aq() local
1856 struct i40e_hw *hw = &pf->hw; in i40e_config_rss_aq()
1864 dev_info(&pf->pdev->dev, in i40e_config_rss_aq()
1876 dev_info(&pf->pdev->dev, in i40e_config_rss_aq()
1892 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_rss() local
1897 if (!test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) in i40e_vsi_config_rss()
1900 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_vsi_config_rss()
1914 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_vsi_config_rss()
2025 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_queue_map() local
2052 else if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_vsi_setup_queue_map()
2053 vsi->num_queue_pairs = pf->num_lan_msix; in i40e_vsi_setup_queue_map()
2072 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); in i40e_vsi_setup_queue_map()
2077 i40e_pf_get_max_q_per_tc(pf)); in i40e_vsi_setup_queue_map()
2084 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_vsi_setup_queue_map()
2085 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); in i40e_vsi_setup_queue_map()
2097 pf->flags) && in i40e_vsi_setup_queue_map()
2099 pf->flags)) || in i40e_vsi_setup_queue_map()
2101 qcount = min_t(int, pf->alloc_rss_size, in i40e_vsi_setup_queue_map()
2470 * @pf: board private structure
2473 * There are different ways of setting promiscuous mode on a PF depending on
2477 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) in i40e_set_promiscuous() argument
2479 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_set_promiscuous()
2480 struct i40e_hw *hw = &pf->hw; in i40e_set_promiscuous()
2484 pf->lan_veb != I40E_NO_VEB && in i40e_set_promiscuous()
2485 !test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { in i40e_set_promiscuous()
2500 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2512 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2522 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2530 pf->cur_promisc = promisc; in i40e_set_promiscuous()
2552 char vsi_name[16] = "PF"; in i40e_sync_vsi_filters()
2556 struct i40e_pf *pf; in i40e_sync_vsi_filters() local
2571 pf = vsi->back; in i40e_sync_vsi_filters()
2628 else if (pf->vf) in i40e_sync_vsi_filters()
2631 vlan_filters, pf->vf[vsi->vf_id].trusted); in i40e_sync_vsi_filters()
2793 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2801 if (vsi->type == I40E_VSI_SRIOV && pf->vf && in i40e_sync_vsi_filters()
2802 !pf->vf[vsi->vf_id].trusted) { in i40e_sync_vsi_filters()
2827 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2833 dev_info(&pf->pdev->dev, "%s allmulti mode.\n", in i40e_sync_vsi_filters()
2843 aq_ret = i40e_set_promiscuous(pf, cur_promisc); in i40e_sync_vsi_filters()
2847 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2878 * @pf: board private structure
2880 static void i40e_sync_filters_subtask(struct i40e_pf *pf) in i40e_sync_filters_subtask() argument
2884 if (!pf) in i40e_sync_filters_subtask()
2886 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) in i40e_sync_filters_subtask()
2888 if (test_bit(__I40E_VF_DISABLE, pf->state)) { in i40e_sync_filters_subtask()
2889 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); in i40e_sync_filters_subtask()
2893 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_sync_filters_subtask()
2894 if (pf->vsi[v] && in i40e_sync_filters_subtask()
2895 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) && in i40e_sync_filters_subtask()
2896 !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) { in i40e_sync_filters_subtask()
2897 int ret = i40e_sync_vsi_filters(pf->vsi[v]); in i40e_sync_filters_subtask()
2902 pf->state); in i40e_sync_filters_subtask()
2952 struct i40e_pf *pf = vsi->back; in i40e_change_mtu() local
2967 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_change_mtu()
2968 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_change_mtu()
2981 struct i40e_pf *pf = np->vsi->back; in i40e_ioctl() local
2985 return i40e_ptp_get_ts_config(pf, ifr); in i40e_ioctl()
2987 return i40e_ptp_set_ts_config(pf, ifr); in i40e_ioctl()
3823 * @pf: Pointer to the targeted PF
3827 static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf) in i40e_reset_fdir_filter_cnt() argument
3829 pf->fd_tcp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3830 pf->fd_udp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3831 pf->fd_sctp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3832 pf->fd_ip4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3833 pf->fd_tcp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3834 pf->fd_udp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3835 pf->fd_sctp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3836 pf->fd_ip6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3849 struct i40e_pf *pf = vsi->back; in i40e_fdir_filter_restore() local
3852 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) in i40e_fdir_filter_restore()
3856 i40e_reset_fdir_filter_cnt(pf); in i40e_fdir_filter_restore()
3859 &pf->fdir_filter_list, fdir_node) { in i40e_fdir_filter_restore()
3889 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_msix() local
3890 struct i40e_hw *hw = &pf->hw; in i40e_vsi_configure_msix()
3970 * @pf: pointer to private device data structure
3972 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) in i40e_enable_misc_int_causes() argument
3974 struct i40e_hw *hw = &pf->hw; in i40e_enable_misc_int_causes()
3990 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) in i40e_enable_misc_int_causes()
3993 if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) in i40e_enable_misc_int_causes()
4014 struct i40e_pf *pf = vsi->back; in i40e_configure_msi_and_legacy() local
4015 struct i40e_hw *hw = &pf->hw; in i40e_configure_msi_and_legacy()
4027 i40e_enable_misc_int_causes(pf); in i40e_configure_msi_and_legacy()
4051 * @pf: board private structure
4053 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) in i40e_irq_dynamic_disable_icr0() argument
4055 struct i40e_hw *hw = &pf->hw; in i40e_irq_dynamic_disable_icr0()
4064 * @pf: board private structure
4066 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) in i40e_irq_dynamic_enable_icr0() argument
4068 struct i40e_hw *hw = &pf->hw; in i40e_irq_dynamic_enable_icr0()
4133 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq_msix() local
4144 irq_num = pf->msix_entries[base + vector].vector; in i40e_vsi_request_irq_msix()
4166 dev_info(&pf->pdev->dev, in i40e_vsi_request_irq_msix()
4192 irq_num = pf->msix_entries[base + vector].vector; in i40e_vsi_request_irq_msix()
4206 struct i40e_pf *pf = vsi->back; in i40e_vsi_disable_irq() local
4207 struct i40e_hw *hw = &pf->hw; in i40e_vsi_disable_irq()
4229 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_vsi_disable_irq()
4236 synchronize_irq(pf->msix_entries[i + base].vector); in i40e_vsi_disable_irq()
4242 synchronize_irq(pf->pdev->irq); in i40e_vsi_disable_irq()
4252 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_irq() local
4255 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_vsi_enable_irq()
4259 i40e_irq_dynamic_enable_icr0(pf); in i40e_vsi_enable_irq()
4262 i40e_flush(&pf->hw); in i40e_vsi_enable_irq()
4268 * @pf: board private structure
4270 static void i40e_free_misc_vector(struct i40e_pf *pf) in i40e_free_misc_vector() argument
4273 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); in i40e_free_misc_vector()
4274 i40e_flush(&pf->hw); in i40e_free_misc_vector()
4276 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { in i40e_free_misc_vector()
4277 free_irq(pf->msix_entries[0].vector, pf); in i40e_free_misc_vector()
4278 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); in i40e_free_misc_vector()
4293 struct i40e_pf *pf = (struct i40e_pf *)data; in i40e_intr() local
4294 struct i40e_hw *hw = &pf->hw; in i40e_intr()
4309 pf->sw_int_count++; in i40e_intr()
4311 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) && in i40e_intr()
4314 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); in i40e_intr()
4315 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_intr()
4320 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_intr()
4329 if (!test_bit(__I40E_DOWN, pf->state)) in i40e_intr()
4335 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); in i40e_intr()
4336 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); in i40e_intr()
4341 set_bit(__I40E_MDD_EVENT_PENDING, pf->state); in i40e_intr()
4346 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) { in i40e_intr()
4353 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); in i40e_intr()
4358 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_intr()
4359 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); in i40e_intr()
4364 pf->corer_count++; in i40e_intr()
4366 pf->globr_count++; in i40e_intr()
4368 pf->empr_count++; in i40e_intr()
4369 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state); in i40e_intr()
4375 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); in i40e_intr()
4376 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", in i40e_intr()
4385 schedule_work(&pf->ptp_extts0_work); in i40e_intr()
4388 i40e_ptp_tx_hwtstamp(pf); in i40e_intr()
4399 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", in i40e_intr()
4404 dev_info(&pf->pdev->dev, "device will be reset\n"); in i40e_intr()
4405 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_intr()
4406 i40e_service_event_schedule(pf); in i40e_intr()
4415 if (!test_bit(__I40E_DOWN, pf->state) || in i40e_intr()
4416 test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_intr()
4417 i40e_service_event_schedule(pf); in i40e_intr()
4418 i40e_irq_dynamic_enable_icr0(pf); in i40e_intr()
4613 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq() local
4616 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_vsi_request_irq()
4618 else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) in i40e_vsi_request_irq()
4619 err = request_irq(pf->pdev->irq, i40e_intr, 0, in i40e_vsi_request_irq()
4620 pf->int_name, pf); in i40e_vsi_request_irq()
4622 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, in i40e_vsi_request_irq()
4623 pf->int_name, pf); in i40e_vsi_request_irq()
4626 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); in i40e_vsi_request_irq()
4643 struct i40e_pf *pf = vsi->back; in i40e_netpoll() local
4650 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_netpoll()
4654 i40e_intr(pf->pdev->irq, netdev); in i40e_netpoll()
4662 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4663 * @pf: the PF being configured
4664 * @pf_q: the PF queue
4667 * This routine will wait for the given Tx queue of the PF to reach the
4672 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) in i40e_pf_txq_wait() argument
4678 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); in i40e_pf_txq_wait()
4692 * @pf: the PF structure
4693 * @pf_q: the PF queue to configure
4700 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable) in i40e_control_tx_q() argument
4702 struct i40e_hw *hw = &pf->hw; in i40e_control_tx_q()
4707 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); in i40e_control_tx_q()
4737 * @pf: the PF structure
4738 * @pf_q: the PF queue to configure
4742 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, in i40e_control_wait_tx_q() argument
4747 i40e_control_tx_q(pf, pf_q, enable); in i40e_control_wait_tx_q()
4750 ret = i40e_pf_txq_wait(pf, pf_q, enable); in i40e_control_wait_tx_q()
4752 dev_info(&pf->pdev->dev, in i40e_control_wait_tx_q()
4767 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_tx() local
4772 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4781 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4791 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4792 * @pf: the PF being configured
4793 * @pf_q: the PF queue
4796 * This routine will wait for the given Rx queue of the PF to reach the
4801 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) in i40e_pf_rxq_wait() argument
4807 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); in i40e_pf_rxq_wait()
4821 * @pf: the PF structure
4822 * @pf_q: the PF queue to configure
4829 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) in i40e_control_rx_q() argument
4831 struct i40e_hw *hw = &pf->hw; in i40e_control_rx_q()
4858 * @pf: the PF structure
4866 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable) in i40e_control_wait_rx_q() argument
4870 i40e_control_rx_q(pf, pf_q, enable); in i40e_control_wait_rx_q()
4873 ret = i40e_pf_rxq_wait(pf, pf_q, enable); in i40e_control_wait_rx_q()
4886 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_rx() local
4891 ret = i40e_control_wait_rx_q(pf, pf_q, true); in i40e_vsi_enable_rx()
4893 dev_info(&pf->pdev->dev, in i40e_vsi_enable_rx()
4928 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings() local
4938 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false); in i40e_vsi_stop_rings()
4942 i40e_control_rx_q(pf, pf_q, false); in i40e_vsi_stop_rings()
4946 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); in i40e_vsi_stop_rings()
4964 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings_no_wait() local
4969 i40e_control_tx_q(pf, pf_q, false); in i40e_vsi_stop_rings_no_wait()
4970 i40e_control_rx_q(pf, pf_q, false); in i40e_vsi_stop_rings_no_wait()
4980 struct i40e_pf *pf = vsi->back; in i40e_vsi_free_irq() local
4981 struct i40e_hw *hw = &pf->hw; in i40e_vsi_free_irq()
4986 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_vsi_free_irq()
4999 irq_num = pf->msix_entries[vector].vector; in i40e_vsi_free_irq()
5059 free_irq(pf->pdev->irq, pf); in i40e_vsi_free_irq()
5142 * @pf: board private structure
5144 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) in i40e_reset_interrupt_capability() argument
5147 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_reset_interrupt_capability()
5148 pci_disable_msix(pf->pdev); in i40e_reset_interrupt_capability()
5149 kfree(pf->msix_entries); in i40e_reset_interrupt_capability()
5150 pf->msix_entries = NULL; in i40e_reset_interrupt_capability()
5151 kfree(pf->irq_pile); in i40e_reset_interrupt_capability()
5152 pf->irq_pile = NULL; in i40e_reset_interrupt_capability()
5153 } else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) { in i40e_reset_interrupt_capability()
5154 pci_disable_msi(pf->pdev); in i40e_reset_interrupt_capability()
5156 clear_bit(I40E_FLAG_MSI_ENA, pf->flags); in i40e_reset_interrupt_capability()
5157 clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); in i40e_reset_interrupt_capability()
5162 * @pf: board private structure
5167 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) in i40e_clear_interrupt_scheme() argument
5171 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) in i40e_clear_interrupt_scheme()
5172 i40e_free_misc_vector(pf); in i40e_clear_interrupt_scheme()
5174 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, in i40e_clear_interrupt_scheme()
5177 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); in i40e_clear_interrupt_scheme()
5178 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_clear_interrupt_scheme()
5179 if (pf->vsi[i]) in i40e_clear_interrupt_scheme()
5180 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_clear_interrupt_scheme()
5181 i40e_reset_interrupt_capability(pf); in i40e_clear_interrupt_scheme()
5228 struct i40e_pf *pf = vsi->back; in i40e_vsi_close() local
5235 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_vsi_close()
5236 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_vsi_close()
5237 set_bit(__I40E_CLIENT_RESET, pf->state); in i40e_vsi_close()
5272 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5273 * @pf: the PF
5275 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) in i40e_pf_quiesce_all_vsi() argument
5279 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_quiesce_all_vsi()
5280 if (pf->vsi[v]) in i40e_pf_quiesce_all_vsi()
5281 i40e_quiesce_vsi(pf->vsi[v]); in i40e_pf_quiesce_all_vsi()
5286 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5287 * @pf: the PF
5289 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) in i40e_pf_unquiesce_all_vsi() argument
5293 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_unquiesce_all_vsi()
5294 if (pf->vsi[v]) in i40e_pf_unquiesce_all_vsi()
5295 i40e_unquiesce_vsi(pf->vsi[v]); in i40e_pf_unquiesce_all_vsi()
5307 struct i40e_pf *pf = vsi->back; in i40e_vsi_wait_queues_disabled() local
5313 ret = i40e_pf_txq_wait(pf, pf_q, false); in i40e_vsi_wait_queues_disabled()
5315 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5325 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs, in i40e_vsi_wait_queues_disabled()
5328 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5335 ret = i40e_pf_rxq_wait(pf, pf_q, false); in i40e_vsi_wait_queues_disabled()
5337 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5349 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5350 * @pf: the PF
5353 * VSIs that are managed by this PF.
5355 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) in i40e_pf_wait_queues_disabled() argument
5359 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_wait_queues_disabled()
5360 if (pf->vsi[v]) { in i40e_pf_wait_queues_disabled()
5361 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); in i40e_pf_wait_queues_disabled()
5374 * @pf: pointer to PF
5376 * Get TC map for ISCSI PF type that will include iSCSI TC
5379 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) in i40e_get_iscsi_tc_map() argument
5382 struct i40e_hw *hw = &pf->hw; in i40e_get_iscsi_tc_map()
5464 * @pf: PF being queried
5469 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) in i40e_mqprio_get_enabled_tc() argument
5471 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_mqprio_get_enabled_tc()
5481 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5482 * @pf: PF being queried
5484 * Return number of traffic classes enabled for the given PF
5486 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) in i40e_pf_get_num_tc() argument
5488 struct i40e_hw *hw = &pf->hw; in i40e_pf_get_num_tc()
5493 if (i40e_is_tc_mqprio_enabled(pf)) in i40e_pf_get_num_tc()
5494 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; in i40e_pf_get_num_tc()
5497 if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) in i40e_pf_get_num_tc()
5501 if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) in i40e_pf_get_num_tc()
5504 /* MFP mode return count of enabled TCs for this PF */ in i40e_pf_get_num_tc()
5505 if (pf->hw.func_caps.iscsi) in i40e_pf_get_num_tc()
5506 enabled_tc = i40e_get_iscsi_tc_map(pf); in i40e_pf_get_num_tc()
5519 * @pf: PF being queried
5521 * Return a bitmap for enabled traffic classes for this PF.
5523 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) in i40e_pf_get_tc_map() argument
5525 if (i40e_is_tc_mqprio_enabled(pf)) in i40e_pf_get_tc_map()
5526 return i40e_mqprio_get_enabled_tc(pf); in i40e_pf_get_tc_map()
5528 /* If neither MQPRIO nor DCB is enabled for this PF then just return in i40e_pf_get_tc_map()
5531 if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) in i40e_pf_get_tc_map()
5534 /* SFP mode we want PF to be enabled for all TCs */ in i40e_pf_get_tc_map()
5535 if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) in i40e_pf_get_tc_map()
5536 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); in i40e_pf_get_tc_map()
5538 /* MFP enabled and iSCSI PF type */ in i40e_pf_get_tc_map()
5539 if (pf->hw.func_caps.iscsi) in i40e_pf_get_tc_map()
5540 return i40e_get_iscsi_tc_map(pf); in i40e_pf_get_tc_map()
5555 struct i40e_pf *pf = vsi->back; in i40e_vsi_get_bw_info() local
5556 struct i40e_hw *hw = &pf->hw; in i40e_vsi_get_bw_info()
5564 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5565 "couldn't get PF vsi bw config, err %pe aq_err %s\n", in i40e_vsi_get_bw_info()
5567 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_vsi_get_bw_info()
5575 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5576 "couldn't get PF vsi ets bw config, err %pe aq_err %s\n", in i40e_vsi_get_bw_info()
5578 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_vsi_get_bw_info()
5583 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5617 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_bw_alloc() local
5622 if (i40e_is_tc_mqprio_enabled(pf)) in i40e_vsi_configure_bw_alloc()
5624 if (!vsi->mqprio_qopt.qopt.hw && !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { in i40e_vsi_configure_bw_alloc()
5627 dev_info(&pf->pdev->dev, in i40e_vsi_configure_bw_alloc()
5637 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); in i40e_vsi_configure_bw_alloc()
5639 dev_info(&pf->pdev->dev, in i40e_vsi_configure_bw_alloc()
5641 pf->hw.aq.asq_last_status); in i40e_vsi_configure_bw_alloc()
5660 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_netdev_tc() local
5661 struct i40e_hw *hw = &pf->hw; in i40e_vsi_config_netdev_tc()
5694 if (i40e_is_tc_mqprio_enabled(pf)) in i40e_vsi_config_netdev_tc()
5734 struct i40e_pf *pf; in i40e_update_adq_vsi_queues() local
5740 pf = vsi->back; in i40e_update_adq_vsi_queues()
5741 hw = &pf->hw; in i40e_update_adq_vsi_queues()
5754 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_update_adq_vsi_queues()
5758 dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n"); in i40e_update_adq_vsi_queues()
5766 dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n", in i40e_update_adq_vsi_queues()
5794 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_tc() local
5795 struct i40e_hw *hw = &pf->hw; in i40e_vsi_config_tc()
5815 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5821 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5834 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5842 dev_err(&pf->pdev->dev, in i40e_vsi_config_tc()
5855 if (i40e_is_tc_mqprio_enabled(pf)) { in i40e_vsi_config_tc()
5888 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5901 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5921 struct i40e_pf *pf = vsi->back; in i40e_get_link_speed() local
5923 switch (pf->hw.phy.link_info.link_speed) { in i40e_get_link_speed()
5969 struct i40e_pf *pf = vsi->back; in i40e_set_bw_limit() local
5976 dev_err(&pf->pdev->dev, in i40e_set_bw_limit()
5982 dev_warn(&pf->pdev->dev, in i40e_set_bw_limit()
5990 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits, in i40e_set_bw_limit()
5993 dev_err(&pf->pdev->dev, in i40e_set_bw_limit()
5996 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_set_bw_limit()
6011 struct i40e_pf *pf = vsi->back; in i40e_remove_queue_channels() local
6055 &pf->cloud_filter_list, cloud_node) { in i40e_remove_queue_channels()
6067 last_aq_status = pf->hw.aq.asq_last_status; in i40e_remove_queue_channels()
6069 dev_info(&pf->pdev->dev, in i40e_remove_queue_channels()
6072 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_remove_queue_channels()
6112 * @pf: ptr to PF device
6121 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues, in i40e_validate_num_queues() argument
6132 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
6138 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
6153 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
6173 struct i40e_pf *pf = vsi->back; in i40e_vsi_reconfig_rss() local
6175 struct i40e_hw *hw = &pf->hw; in i40e_vsi_reconfig_rss()
6192 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size); in i40e_vsi_reconfig_rss()
6204 dev_info(&pf->pdev->dev, in i40e_vsi_reconfig_rss()
6223 * @pf: ptr to PF device
6229 static void i40e_channel_setup_queue_map(struct i40e_pf *pf, in i40e_channel_setup_queue_map() argument
6240 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix); in i40e_channel_setup_queue_map()
6262 * @pf: ptr to PF device
6268 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, in i40e_add_channel() argument
6271 struct i40e_hw *hw = &pf->hw; in i40e_add_channel()
6277 dev_info(&pf->pdev->dev, in i40e_add_channel()
6290 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { in i40e_add_channel()
6298 i40e_channel_setup_queue_map(pf, &ctxt, ch); in i40e_add_channel()
6303 dev_info(&pf->pdev->dev, in i40e_add_channel()
6306 i40e_aq_str(&pf->hw, in i40e_add_channel()
6307 pf->hw.aq.asq_last_status)); in i40e_add_channel()
6361 * @pf: ptr to PF device
6368 static int i40e_channel_config_tx_ring(struct i40e_pf *pf, in i40e_channel_config_tx_ring() argument
6413 * @pf: ptr to PF device
6422 static inline int i40e_setup_hw_channel(struct i40e_pf *pf, in i40e_setup_hw_channel() argument
6434 ret = i40e_add_channel(pf, uplink_seid, ch); in i40e_setup_hw_channel()
6436 dev_info(&pf->pdev->dev, in i40e_setup_hw_channel()
6446 ret = i40e_channel_config_tx_ring(pf, vsi, ch); in i40e_setup_hw_channel()
6448 dev_info(&pf->pdev->dev, in i40e_setup_hw_channel()
6456 dev_dbg(&pf->pdev->dev, in i40e_setup_hw_channel()
6457 …"Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base… in i40e_setup_hw_channel()
6466 * @pf: ptr to PF device
6473 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, in i40e_setup_channel() argument
6483 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n", in i40e_setup_channel()
6489 seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_setup_channel()
6492 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type); in i40e_setup_channel()
6494 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n"); in i40e_setup_channel()
6503 * @vsi: ptr to VSI which has PF backing
6511 struct i40e_pf *pf = vsi->back; in i40e_validate_and_set_switch_mode() local
6512 struct i40e_hw *hw = &pf->hw; in i40e_validate_and_set_switch_mode()
6515 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities); in i40e_validate_and_set_switch_mode()
6528 dev_err(&pf->pdev->dev, in i40e_validate_and_set_switch_mode()
6545 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, in i40e_validate_and_set_switch_mode()
6546 pf->last_sw_conf_valid_flags, in i40e_validate_and_set_switch_mode()
6549 dev_err(&pf->pdev->dev, in i40e_validate_and_set_switch_mode()
6569 struct i40e_pf *pf = vsi->back; in i40e_create_queue_channel() local
6577 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n", in i40e_create_queue_channel()
6583 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi, in i40e_create_queue_channel()
6586 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n", in i40e_create_queue_channel()
6595 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { in i40e_create_queue_channel()
6596 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); in i40e_create_queue_channel()
6599 if (i40e_is_tc_mqprio_enabled(pf)) in i40e_create_queue_channel()
6600 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); in i40e_create_queue_channel()
6602 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); in i40e_create_queue_channel()
6613 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6623 dev_info(&pf->pdev->dev, in i40e_create_queue_channel()
6630 if (!i40e_setup_channel(pf, vsi, ch)) { in i40e_create_queue_channel()
6631 dev_info(&pf->pdev->dev, "Failed to setup channel\n"); in i40e_create_queue_channel()
6635 dev_info(&pf->pdev->dev, in i40e_create_queue_channel()
6647 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6730 struct i40e_pf *pf = veb->pf; in i40e_veb_config_tc() local
6747 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, in i40e_veb_config_tc()
6750 dev_info(&pf->pdev->dev, in i40e_veb_config_tc()
6753 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_veb_config_tc()
6760 dev_info(&pf->pdev->dev, in i40e_veb_config_tc()
6763 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_veb_config_tc()
6773 * @pf: PF struct
6775 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6779 static void i40e_dcb_reconfigure(struct i40e_pf *pf) in i40e_dcb_reconfigure() argument
6785 /* Enable the TCs available on PF to all VEBs */ in i40e_dcb_reconfigure()
6786 tc_map = i40e_pf_get_tc_map(pf); in i40e_dcb_reconfigure()
6791 if (!pf->veb[v]) in i40e_dcb_reconfigure()
6793 ret = i40e_veb_config_tc(pf->veb[v], tc_map); in i40e_dcb_reconfigure()
6795 dev_info(&pf->pdev->dev, in i40e_dcb_reconfigure()
6797 pf->veb[v]->seid); in i40e_dcb_reconfigure()
6803 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_dcb_reconfigure()
6804 if (!pf->vsi[v]) in i40e_dcb_reconfigure()
6810 if (v == pf->lan_vsi) in i40e_dcb_reconfigure()
6811 tc_map = i40e_pf_get_tc_map(pf); in i40e_dcb_reconfigure()
6815 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); in i40e_dcb_reconfigure()
6817 dev_info(&pf->pdev->dev, in i40e_dcb_reconfigure()
6819 pf->vsi[v]->seid); in i40e_dcb_reconfigure()
6823 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); in i40e_dcb_reconfigure()
6824 if (pf->vsi[v]->netdev) in i40e_dcb_reconfigure()
6825 i40e_dcbnl_set_all(pf->vsi[v]); in i40e_dcb_reconfigure()
6832 * @pf: PF struct
6834 * Resume a port's Tx and issue a PF reset in case of failure to
6837 static int i40e_resume_port_tx(struct i40e_pf *pf) in i40e_resume_port_tx() argument
6839 struct i40e_hw *hw = &pf->hw; in i40e_resume_port_tx()
6844 dev_info(&pf->pdev->dev, in i40e_resume_port_tx()
6847 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_resume_port_tx()
6848 /* Schedule PF reset to recover */ in i40e_resume_port_tx()
6849 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_resume_port_tx()
6850 i40e_service_event_schedule(pf); in i40e_resume_port_tx()
6858 * @pf: PF struct
6860 * Suspend a port's Tx and issue a PF reset in case of failure.
6862 static int i40e_suspend_port_tx(struct i40e_pf *pf) in i40e_suspend_port_tx() argument
6864 struct i40e_hw *hw = &pf->hw; in i40e_suspend_port_tx()
6867 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL); in i40e_suspend_port_tx()
6869 dev_info(&pf->pdev->dev, in i40e_suspend_port_tx()
6872 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_suspend_port_tx()
6873 /* Schedule PF reset to recover */ in i40e_suspend_port_tx()
6874 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_suspend_port_tx()
6875 i40e_service_event_schedule(pf); in i40e_suspend_port_tx()
6883 * @pf: PF being configured
6887 * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
6889 static int i40e_hw_set_dcb_config(struct i40e_pf *pf, in i40e_hw_set_dcb_config() argument
6892 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config; in i40e_hw_set_dcb_config()
6897 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n"); in i40e_hw_set_dcb_config()
6902 i40e_pf_quiesce_all_vsi(pf); in i40e_hw_set_dcb_config()
6907 ret = i40e_set_dcb_config(&pf->hw); in i40e_hw_set_dcb_config()
6909 dev_info(&pf->pdev->dev, in i40e_hw_set_dcb_config()
6912 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_set_dcb_config()
6917 i40e_dcb_reconfigure(pf); in i40e_hw_set_dcb_config()
6920 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) { in i40e_hw_set_dcb_config()
6922 ret = i40e_resume_port_tx(pf); in i40e_hw_set_dcb_config()
6926 i40e_pf_unquiesce_all_vsi(pf); in i40e_hw_set_dcb_config()
6934 * @pf: PF being configured
6938 * given PF
6940 int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) in i40e_hw_dcb_config() argument
6948 struct i40e_hw *hw = &pf->hw; in i40e_hw_dcb_config()
6957 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n"); in i40e_hw_dcb_config()
6999 need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg); in i40e_hw_dcb_config()
7007 set_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_hw_dcb_config()
7009 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_hw_dcb_config()
7011 set_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_hw_dcb_config()
7013 i40e_pf_quiesce_all_vsi(pf); in i40e_hw_dcb_config()
7014 ret = i40e_suspend_port_tx(pf); in i40e_hw_dcb_config()
7023 (hw, pf->mac_seid, &ets_data, in i40e_hw_dcb_config()
7026 dev_info(&pf->pdev->dev, in i40e_hw_dcb_config()
7029 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_dcb_config()
7049 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu; in i40e_hw_dcb_config()
7056 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg); in i40e_hw_dcb_config()
7059 pf->pb_cfg = pb_cfg; in i40e_hw_dcb_config()
7062 ret = i40e_aq_dcb_updated(&pf->hw, NULL); in i40e_hw_dcb_config()
7064 dev_info(&pf->pdev->dev, in i40e_hw_dcb_config()
7067 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_dcb_config()
7075 i40e_dcb_reconfigure(pf); in i40e_hw_dcb_config()
7079 ret = i40e_resume_port_tx(pf); in i40e_hw_dcb_config()
7081 clear_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_hw_dcb_config()
7086 /* Wait for the PF's queues to be disabled */ in i40e_hw_dcb_config()
7087 ret = i40e_pf_wait_queues_disabled(pf); in i40e_hw_dcb_config()
7089 /* Schedule PF reset to recover */ in i40e_hw_dcb_config()
7090 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_hw_dcb_config()
7091 i40e_service_event_schedule(pf); in i40e_hw_dcb_config()
7094 i40e_pf_unquiesce_all_vsi(pf); in i40e_hw_dcb_config()
7095 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_hw_dcb_config()
7096 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_hw_dcb_config()
7099 if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) in i40e_hw_dcb_config()
7100 ret = i40e_hw_set_dcb_config(pf, new_cfg); in i40e_hw_dcb_config()
7109 * @pf: PF being queried
7113 int i40e_dcb_sw_default_config(struct i40e_pf *pf) in i40e_dcb_sw_default_config() argument
7115 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config; in i40e_dcb_sw_default_config()
7117 struct i40e_hw *hw = &pf->hw; in i40e_dcb_sw_default_config()
7120 if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) { in i40e_dcb_sw_default_config()
7122 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config)); in i40e_dcb_sw_default_config()
7123 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; in i40e_dcb_sw_default_config()
7124 pf->tmp_cfg.etscfg.maxtcs = 0; in i40e_dcb_sw_default_config()
7125 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW; in i40e_dcb_sw_default_config()
7126 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; in i40e_dcb_sw_default_config()
7127 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING; in i40e_dcb_sw_default_config()
7128 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; in i40e_dcb_sw_default_config()
7130 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS; in i40e_dcb_sw_default_config()
7131 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE; in i40e_dcb_sw_default_config()
7132 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO; in i40e_dcb_sw_default_config()
7133 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE; in i40e_dcb_sw_default_config()
7135 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg); in i40e_dcb_sw_default_config()
7145 (hw, pf->mac_seid, &ets_data, in i40e_dcb_sw_default_config()
7148 dev_info(&pf->pdev->dev, in i40e_dcb_sw_default_config()
7151 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_dcb_sw_default_config()
7168 * @pf: PF being configured
7173 static int i40e_init_pf_dcb(struct i40e_pf *pf) in i40e_init_pf_dcb() argument
7175 struct i40e_hw *hw = &pf->hw; in i40e_init_pf_dcb()
7181 if (test_bit(I40E_HW_CAP_NO_DCB_SUPPORT, pf->hw.caps)) { in i40e_init_pf_dcb()
7182 dev_info(&pf->pdev->dev, "DCB is not supported.\n"); in i40e_init_pf_dcb()
7186 if (test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) { in i40e_init_pf_dcb()
7187 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n"); in i40e_init_pf_dcb()
7188 err = i40e_dcb_sw_default_config(pf); in i40e_init_pf_dcb()
7190 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n"); in i40e_init_pf_dcb()
7193 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n"); in i40e_init_pf_dcb()
7194 pf->dcbx_cap = DCB_CAP_DCBX_HOST | in i40e_init_pf_dcb()
7197 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_init_pf_dcb()
7198 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_init_pf_dcb()
7206 dev_info(&pf->pdev->dev, in i40e_init_pf_dcb()
7207 "DCBX offload is not supported or is disabled for this PF.\n"); in i40e_init_pf_dcb()
7210 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | in i40e_init_pf_dcb()
7213 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_init_pf_dcb()
7218 set_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_init_pf_dcb()
7220 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_init_pf_dcb()
7221 dev_dbg(&pf->pdev->dev, in i40e_init_pf_dcb()
7222 "DCBX offload is supported for this PF.\n"); in i40e_init_pf_dcb()
7224 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { in i40e_init_pf_dcb()
7225 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); in i40e_init_pf_dcb()
7226 set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags); in i40e_init_pf_dcb()
7228 dev_info(&pf->pdev->dev, in i40e_init_pf_dcb()
7231 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_init_pf_dcb()
7247 struct i40e_pf *pf = vsi->back; in i40e_print_link_message() local
7255 new_speed = pf->hw.phy.link_info.link_speed; in i40e_print_link_message()
7271 if (pf->hw.func_caps.npar_enable && in i40e_print_link_message()
7272 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || in i40e_print_link_message()
7273 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) in i40e_print_link_message()
7277 switch (pf->hw.phy.link_info.link_speed) { in i40e_print_link_message()
7306 switch (pf->hw.fc.current_mode) { in i40e_print_link_message()
7321 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { in i40e_print_link_message()
7326 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) in i40e_print_link_message()
7329 if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7332 else if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7350 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) { in i40e_print_link_message()
7355 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) in i40e_print_link_message()
7358 if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7362 if (pf->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7383 struct i40e_pf *pf = vsi->back; in i40e_up_complete() local
7386 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_up_complete()
7400 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && in i40e_up_complete()
7410 pf->fd_add_err = 0; in i40e_up_complete()
7411 pf->fd_atr_cnt = 0; in i40e_up_complete()
7418 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_up_complete()
7419 i40e_service_event_schedule(pf); in i40e_up_complete()
7433 struct i40e_pf *pf = vsi->back; in i40e_vsi_reinit_locked() local
7435 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_vsi_reinit_locked()
7440 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_vsi_reinit_locked()
7445 * @pf: board private structure
7448 static int i40e_force_link_state(struct i40e_pf *pf, bool is_up) in i40e_force_link_state() argument
7453 struct i40e_hw *hw = &pf->hw; in i40e_force_link_state()
7467 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7479 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7490 if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) in i40e_force_link_state()
7506 if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) { in i40e_force_link_state()
7524 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7527 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_force_link_state()
7812 struct i40e_pf *pf = vsi->back; in i40e_fwd_ring_up() local
7813 struct i40e_hw *hw = &pf->hw; in i40e_fwd_ring_up()
7865 dev_info(&pf->pdev->dev, in i40e_fwd_ring_up()
7885 struct i40e_pf *pf = vsi->back; in i40e_setup_macvlans() local
7886 struct i40e_hw *hw = &pf->hw; in i40e_setup_macvlans()
7922 dev_info(&pf->pdev->dev, in i40e_setup_macvlans()
7938 dev_info(&pf->pdev->dev, in i40e_setup_macvlans()
7958 if (!i40e_setup_channel(pf, vsi, ch)) { in i40e_setup_macvlans()
7972 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n"); in i40e_setup_macvlans()
7988 struct i40e_pf *pf = vsi->back; in i40e_fwd_add() local
7992 if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { in i40e_fwd_add()
7996 if (i40e_is_tc_mqprio_enabled(pf)) { in i40e_fwd_add()
8000 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) { in i40e_fwd_add()
8012 /* reserve bit 0 for the pf device */ in i40e_fwd_add()
8019 vectors = pf->num_lan_msix; in i40e_fwd_add()
8021 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/ in i40e_fwd_add()
8025 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/ in i40e_fwd_add()
8029 /* allocate 1 Q per macvlan and 16 Qs to the PF*/ in i40e_fwd_add()
8033 /* allocate 1 Q per macvlan and 8 Qs to the PF */ in i40e_fwd_add()
8037 /* allocate 1 Q per macvlan and 1 Q to the PF */ in i40e_fwd_add()
8096 struct i40e_pf *pf = vsi->back; in i40e_del_all_macvlans() local
8097 struct i40e_hw *hw = &pf->hw; in i40e_del_all_macvlans()
8133 struct i40e_pf *pf = vsi->back; in i40e_fwd_del() local
8134 struct i40e_hw *hw = &pf->hw; in i40e_fwd_del()
8154 dev_info(&pf->pdev->dev, in i40e_fwd_del()
8174 struct i40e_pf *pf = vsi->back; in i40e_setup_tc() local
8187 clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); in i40e_setup_tc()
8193 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { in i40e_setup_tc()
8200 clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); in i40e_setup_tc()
8203 if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { in i40e_setup_tc()
8210 if (num_tc > i40e_pf_get_num_tc(pf)) { in i40e_setup_tc()
8217 if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { in i40e_setup_tc()
8222 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_setup_tc()
8229 set_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); in i40e_setup_tc()
8230 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_setup_tc()
8249 if (!hw && !i40e_is_tc_mqprio_enabled(pf)) in i40e_setup_tc()
8273 if (i40e_is_tc_mqprio_enabled(pf)) { in i40e_setup_tc()
8371 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter() local
8410 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid, in i40e_add_del_cloud_filter()
8413 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid, in i40e_add_del_cloud_filter()
8416 dev_dbg(&pf->pdev->dev, in i40e_add_del_cloud_filter()
8419 pf->hw.aq.asq_last_status); in i40e_add_del_cloud_filter()
8421 dev_info(&pf->pdev->dev, in i40e_add_del_cloud_filter()
8441 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter_big_buf() local
8499 dev_err(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8512 dev_err(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8518 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid, in i40e_add_del_cloud_filter_big_buf()
8521 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid, in i40e_add_del_cloud_filter_big_buf()
8526 dev_dbg(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8528 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status); in i40e_add_del_cloud_filter_big_buf()
8530 dev_info(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8551 struct i40e_pf *pf = vsi->back; in i40e_parse_cls_flower() local
8563 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%llx\n", in i40e_parse_cls_flower()
8603 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", in i40e_parse_cls_flower()
8613 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", in i40e_parse_cls_flower()
8631 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", in i40e_parse_cls_flower()
8655 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", in i40e_parse_cls_flower()
8665 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", in i40e_parse_cls_flower()
8672 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); in i40e_parse_cls_flower()
8689 dev_err(&pf->pdev->dev, in i40e_parse_cls_flower()
8711 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", in i40e_parse_cls_flower()
8721 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", in i40e_parse_cls_flower()
8735 dev_err(&pf->pdev->dev, in i40e_parse_cls_flower()
8790 struct i40e_pf *pf = vsi->back; in i40e_configure_clsflower() local
8799 dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination"); in i40e_configure_clsflower()
8803 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_configure_clsflower()
8804 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_configure_clsflower()
8807 if (pf->fdir_pf_active_filters || in i40e_configure_clsflower()
8808 (!hlist_empty(&pf->fdir_filter_list))) { in i40e_configure_clsflower()
8842 dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n", in i40e_configure_clsflower()
8850 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list); in i40e_configure_clsflower()
8852 pf->num_cloud_filters++; in i40e_configure_clsflower()
8889 struct i40e_pf *pf = vsi->back; in i40e_delete_clsflower() local
8906 dev_err(&pf->pdev->dev, in i40e_delete_clsflower()
8909 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); in i40e_delete_clsflower()
8912 pf->num_cloud_filters--; in i40e_delete_clsflower()
8913 if (!pf->num_cloud_filters) in i40e_delete_clsflower()
8914 if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) && in i40e_delete_clsflower()
8915 !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) { in i40e_delete_clsflower()
8916 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_delete_clsflower()
8917 clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags); in i40e_delete_clsflower()
8918 clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_delete_clsflower()
8998 struct i40e_pf *pf = vsi->back; in i40e_open() local
9002 if (test_bit(__I40E_TESTING, pf->state) || in i40e_open()
9003 test_bit(__I40E_BAD_EEPROM, pf->state)) in i40e_open()
9008 if (i40e_force_link_state(pf, true)) in i40e_open()
9016 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | in i40e_open()
9018 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | in i40e_open()
9021 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); in i40e_open()
9060 struct i40e_pf *pf = vsi->back; in i40e_vsi_open() local
9078 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); in i40e_vsi_open()
9090 dev_driver_string(&pf->pdev->dev), in i40e_vsi_open()
9091 dev_name(&pf->pdev->dev)); in i40e_vsi_open()
9115 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_open()
9116 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); in i40e_vsi_open()
9123 * @pf: Pointer to PF
9128 static void i40e_fdir_filter_exit(struct i40e_pf *pf) in i40e_fdir_filter_exit() argument
9135 &pf->fdir_filter_list, fdir_node) { in i40e_fdir_filter_exit()
9140 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) { in i40e_fdir_filter_exit()
9144 INIT_LIST_HEAD(&pf->l3_flex_pit_list); in i40e_fdir_filter_exit()
9146 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) { in i40e_fdir_filter_exit()
9150 INIT_LIST_HEAD(&pf->l4_flex_pit_list); in i40e_fdir_filter_exit()
9152 pf->fdir_pf_active_filters = 0; in i40e_fdir_filter_exit()
9153 i40e_reset_fdir_filter_cnt(pf); in i40e_fdir_filter_exit()
9156 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, in i40e_fdir_filter_exit()
9161 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP, in i40e_fdir_filter_exit()
9166 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP, in i40e_fdir_filter_exit()
9171 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP, in i40e_fdir_filter_exit()
9176 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, in i40e_fdir_filter_exit()
9181 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP, in i40e_fdir_filter_exit()
9186 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, in i40e_fdir_filter_exit()
9189 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, in i40e_fdir_filter_exit()
9193 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, in i40e_fdir_filter_exit()
9196 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6, in i40e_fdir_filter_exit()
9202 * @pf: Pointer to PF
9207 static void i40e_cloud_filter_exit(struct i40e_pf *pf) in i40e_cloud_filter_exit() argument
9213 &pf->cloud_filter_list, cloud_node) { in i40e_cloud_filter_exit()
9217 pf->num_cloud_filters = 0; in i40e_cloud_filter_exit()
9219 if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) && in i40e_cloud_filter_exit()
9220 !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) { in i40e_cloud_filter_exit()
9221 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_cloud_filter_exit()
9222 clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags); in i40e_cloud_filter_exit()
9223 clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_cloud_filter_exit()
9248 * i40e_do_reset - Start a PF or Core Reset sequence
9249 * @pf: board private structure
9254 * The essential difference in resets is that the PF Reset
9258 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) in i40e_do_reset() argument
9273 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); in i40e_do_reset()
9274 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_do_reset()
9276 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); in i40e_do_reset()
9284 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); in i40e_do_reset()
9285 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_do_reset()
9287 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); in i40e_do_reset()
9288 i40e_flush(&pf->hw); in i40e_do_reset()
9292 /* Request a PF Reset in i40e_do_reset()
9294 * Resets only the PF-specific registers in i40e_do_reset()
9300 dev_dbg(&pf->pdev->dev, "PFR requested\n"); in i40e_do_reset()
9301 i40e_handle_reset_warning(pf, lock_acquired); in i40e_do_reset()
9304 /* Request a PF Reset in i40e_do_reset()
9306 * Resets PF and reinitializes PFs VSI. in i40e_do_reset()
9308 i40e_prep_for_reset(pf); in i40e_do_reset()
9309 i40e_reset_and_rebuild(pf, true, lock_acquired); in i40e_do_reset()
9310 dev_info(&pf->pdev->dev, in i40e_do_reset()
9311 test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ? in i40e_do_reset()
9319 dev_info(&pf->pdev->dev, in i40e_do_reset()
9321 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
9322 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset()
9327 i40e_vsi_reinit_locked(pf->vsi[v]); in i40e_do_reset()
9333 dev_info(&pf->pdev->dev, "VSI down requested\n"); in i40e_do_reset()
9334 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
9335 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset()
9345 dev_info(&pf->pdev->dev, in i40e_do_reset()
9353 * @pf: board private structure
9357 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, in i40e_dcb_need_reconfig() argument
9372 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); in i40e_dcb_need_reconfig()
9378 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); in i40e_dcb_need_reconfig()
9383 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); in i40e_dcb_need_reconfig()
9391 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); in i40e_dcb_need_reconfig()
9399 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); in i40e_dcb_need_reconfig()
9402 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); in i40e_dcb_need_reconfig()
9408 * @pf: board private structure
9411 static int i40e_handle_lldp_event(struct i40e_pf *pf, in i40e_handle_lldp_event() argument
9416 struct i40e_hw *hw = &pf->hw; in i40e_handle_lldp_event()
9426 !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) in i40e_handle_lldp_event()
9428 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_handle_lldp_event()
9431 if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) in i40e_handle_lldp_event()
9437 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); in i40e_handle_lldp_event()
9443 dev_dbg(&pf->pdev->dev, in i40e_handle_lldp_event()
9459 ret = i40e_get_dcb_config(&pf->hw); in i40e_handle_lldp_event()
9465 dev_warn(&pf->pdev->dev, in i40e_handle_lldp_event()
9467 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_handle_lldp_event()
9469 dev_info(&pf->pdev->dev, in i40e_handle_lldp_event()
9472 i40e_aq_str(&pf->hw, in i40e_handle_lldp_event()
9473 pf->hw.aq.asq_last_status)); in i40e_handle_lldp_event()
9481 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); in i40e_handle_lldp_event()
9485 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, in i40e_handle_lldp_event()
9488 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); in i40e_handle_lldp_event()
9495 set_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_handle_lldp_event()
9497 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_handle_lldp_event()
9499 set_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_handle_lldp_event()
9501 i40e_pf_quiesce_all_vsi(pf); in i40e_handle_lldp_event()
9504 i40e_dcb_reconfigure(pf); in i40e_handle_lldp_event()
9506 ret = i40e_resume_port_tx(pf); in i40e_handle_lldp_event()
9508 clear_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_handle_lldp_event()
9513 /* Wait for the PF's queues to be disabled */ in i40e_handle_lldp_event()
9514 ret = i40e_pf_wait_queues_disabled(pf); in i40e_handle_lldp_event()
9516 /* Schedule PF reset to recover */ in i40e_handle_lldp_event()
9517 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_handle_lldp_event()
9518 i40e_service_event_schedule(pf); in i40e_handle_lldp_event()
9520 i40e_pf_unquiesce_all_vsi(pf); in i40e_handle_lldp_event()
9521 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_handle_lldp_event()
9522 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_handle_lldp_event()
9532 * @pf: board private structure
9536 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) in i40e_do_reset_safe() argument
9539 i40e_do_reset(pf, reset_flags, true); in i40e_do_reset_safe()
9545 * @pf: board private structure
9548 * Handler for LAN Queue Overflow Event generated by the firmware for PF
9551 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, in i40e_handle_lan_overflow_event() argument
9558 struct i40e_hw *hw = &pf->hw; in i40e_handle_lan_overflow_event()
9562 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", in i40e_handle_lan_overflow_event()
9572 vf = &pf->vf[vf_id]; in i40e_handle_lan_overflow_event()
9581 * @pf: board private structure
9583 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) in i40e_get_cur_guaranteed_fd_count() argument
9587 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); in i40e_get_cur_guaranteed_fd_count()
9593 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9594 * @pf: board private structure
9596 u32 i40e_get_current_fd_count(struct i40e_pf *pf) in i40e_get_current_fd_count() argument
9600 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); in i40e_get_current_fd_count()
9608 * @pf: board private structure
9610 u32 i40e_get_global_fd_count(struct i40e_pf *pf) in i40e_get_global_fd_count() argument
9614 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); in i40e_get_global_fd_count()
9622 * @pf: board private structure
9624 static void i40e_reenable_fdir_sb(struct i40e_pf *pf) in i40e_reenable_fdir_sb() argument
9626 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_reenable_fdir_sb()
9627 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && in i40e_reenable_fdir_sb()
9628 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_reenable_fdir_sb()
9629 …dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now… in i40e_reenable_fdir_sb()
9634 * @pf: board private structure
9636 static void i40e_reenable_fdir_atr(struct i40e_pf *pf) in i40e_reenable_fdir_atr() argument
9638 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) { in i40e_reenable_fdir_atr()
9644 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, in i40e_reenable_fdir_atr()
9648 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && in i40e_reenable_fdir_atr()
9649 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_reenable_fdir_atr()
9650 …dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no c… in i40e_reenable_fdir_atr()
9656 * @pf: board private structure
9659 static void i40e_delete_invalid_filter(struct i40e_pf *pf, in i40e_delete_invalid_filter() argument
9663 pf->fdir_pf_active_filters--; in i40e_delete_invalid_filter()
9664 pf->fd_inv = 0; in i40e_delete_invalid_filter()
9668 pf->fd_tcp4_filter_cnt--; in i40e_delete_invalid_filter()
9671 pf->fd_udp4_filter_cnt--; in i40e_delete_invalid_filter()
9674 pf->fd_sctp4_filter_cnt--; in i40e_delete_invalid_filter()
9677 pf->fd_tcp6_filter_cnt--; in i40e_delete_invalid_filter()
9680 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9683 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9688 pf->fd_tcp4_filter_cnt--; in i40e_delete_invalid_filter()
9691 pf->fd_udp4_filter_cnt--; in i40e_delete_invalid_filter()
9694 pf->fd_sctp4_filter_cnt--; in i40e_delete_invalid_filter()
9697 pf->fd_ip4_filter_cnt--; in i40e_delete_invalid_filter()
9704 pf->fd_tcp6_filter_cnt--; in i40e_delete_invalid_filter()
9707 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9710 pf->fd_sctp6_filter_cnt--; in i40e_delete_invalid_filter()
9713 pf->fd_ip6_filter_cnt--; in i40e_delete_invalid_filter()
9726 * @pf: board private structure
9728 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) in i40e_fdir_check_and_reenable() argument
9734 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fdir_check_and_reenable()
9738 fcnt_prog = i40e_get_global_fd_count(pf); in i40e_fdir_check_and_reenable()
9739 fcnt_avail = pf->fdir_pf_filter_count; in i40e_fdir_check_and_reenable()
9741 (pf->fd_add_err == 0) || in i40e_fdir_check_and_reenable()
9742 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) in i40e_fdir_check_and_reenable()
9743 i40e_reenable_fdir_sb(pf); in i40e_fdir_check_and_reenable()
9750 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0) in i40e_fdir_check_and_reenable()
9751 i40e_reenable_fdir_atr(pf); in i40e_fdir_check_and_reenable()
9754 if (pf->fd_inv > 0) { in i40e_fdir_check_and_reenable()
9756 &pf->fdir_filter_list, fdir_node) in i40e_fdir_check_and_reenable()
9757 if (filter->fd_id == pf->fd_inv) in i40e_fdir_check_and_reenable()
9758 i40e_delete_invalid_filter(pf, filter); in i40e_fdir_check_and_reenable()
9766 * @pf: board private structure
9768 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) in i40e_fdir_flush_and_replay() argument
9776 if (!time_after(jiffies, pf->fd_flush_timestamp + in i40e_fdir_flush_and_replay()
9783 min_flush_time = pf->fd_flush_timestamp + in i40e_fdir_flush_and_replay()
9785 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; in i40e_fdir_flush_and_replay()
9789 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fdir_flush_and_replay()
9790 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); in i40e_fdir_flush_and_replay()
9794 pf->fd_flush_timestamp = jiffies; in i40e_fdir_flush_and_replay()
9795 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fdir_flush_and_replay()
9797 wr32(&pf->hw, I40E_PFQF_CTL_1, in i40e_fdir_flush_and_replay()
9799 i40e_flush(&pf->hw); in i40e_fdir_flush_and_replay()
9800 pf->fd_flush_cnt++; in i40e_fdir_flush_and_replay()
9801 pf->fd_add_err = 0; in i40e_fdir_flush_and_replay()
9805 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); in i40e_fdir_flush_and_replay()
9810 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); in i40e_fdir_flush_and_replay()
9813 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); in i40e_fdir_flush_and_replay()
9814 if (!disable_atr && !pf->fd_tcp4_filter_cnt) in i40e_fdir_flush_and_replay()
9815 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fdir_flush_and_replay()
9816 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); in i40e_fdir_flush_and_replay()
9817 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fdir_flush_and_replay()
9818 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); in i40e_fdir_flush_and_replay()
9824 * @pf: board private structure
9826 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) in i40e_get_current_atr_cnt() argument
9828 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; in i40e_get_current_atr_cnt()
9833 * @pf: board private structure
9835 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) in i40e_fdir_reinit_subtask() argument
9839 if (test_bit(__I40E_DOWN, pf->state)) in i40e_fdir_reinit_subtask()
9842 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fdir_reinit_subtask()
9843 i40e_fdir_flush_and_replay(pf); in i40e_fdir_reinit_subtask()
9845 i40e_fdir_check_and_reenable(pf); in i40e_fdir_reinit_subtask()
9891 struct i40e_pf *pf; in i40e_veb_link_event() local
9894 if (!veb || !veb->pf) in i40e_veb_link_event()
9896 pf = veb->pf; in i40e_veb_link_event()
9900 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9901 i40e_veb_link_event(pf->veb[i], link_up); in i40e_veb_link_event()
9904 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_veb_link_event()
9905 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9906 i40e_vsi_link_event(pf->vsi[i], link_up); in i40e_veb_link_event()
9911 * @pf: board private structure
9913 static void i40e_link_event(struct i40e_pf *pf) in i40e_link_event() argument
9915 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_link_event()
9924 pf->hw.phy.get_link_info = true; in i40e_link_event()
9925 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); in i40e_link_event()
9926 status = i40e_get_link_status(&pf->hw, &new_link); in i40e_link_event()
9930 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state); in i40e_link_event()
9935 set_bit(__I40E_TEMP_LINK_POLLING, pf->state); in i40e_link_event()
9936 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", in i40e_link_event()
9941 old_link_speed = pf->hw.phy.link_info_old.link_speed; in i40e_link_event()
9942 new_link_speed = pf->hw.phy.link_info.link_speed; in i40e_link_event()
9955 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) in i40e_link_event()
9956 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); in i40e_link_event()
9960 if (pf->vf) in i40e_link_event()
9961 i40e_vc_notify_link_state(pf); in i40e_link_event()
9963 if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) in i40e_link_event()
9964 i40e_ptp_set_increment(pf); in i40e_link_event()
9969 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) in i40e_link_event()
9976 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n"); in i40e_link_event()
9977 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg)); in i40e_link_event()
9978 err = i40e_dcb_sw_default_config(pf); in i40e_link_event()
9980 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_link_event()
9981 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_link_event()
9983 pf->dcbx_cap = DCB_CAP_DCBX_HOST | in i40e_link_event()
9985 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_link_event()
9986 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_link_event()
9994 * @pf: board private structure
9996 static void i40e_watchdog_subtask(struct i40e_pf *pf) in i40e_watchdog_subtask() argument
10001 if (test_bit(__I40E_DOWN, pf->state) || in i40e_watchdog_subtask()
10002 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_watchdog_subtask()
10006 if (time_before(jiffies, (pf->service_timer_previous + in i40e_watchdog_subtask()
10007 pf->service_timer_period))) in i40e_watchdog_subtask()
10009 pf->service_timer_previous = jiffies; in i40e_watchdog_subtask()
10011 if (test_bit(I40E_FLAG_LINK_POLLING_ENA, pf->flags) || in i40e_watchdog_subtask()
10012 test_bit(__I40E_TEMP_LINK_POLLING, pf->state)) in i40e_watchdog_subtask()
10013 i40e_link_event(pf); in i40e_watchdog_subtask()
10018 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_watchdog_subtask()
10019 if (pf->vsi[i] && pf->vsi[i]->netdev) in i40e_watchdog_subtask()
10020 i40e_update_stats(pf->vsi[i]); in i40e_watchdog_subtask()
10022 if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) { in i40e_watchdog_subtask()
10025 if (pf->veb[i]) in i40e_watchdog_subtask()
10026 i40e_update_veb_stats(pf->veb[i]); in i40e_watchdog_subtask()
10029 i40e_ptp_rx_hang(pf); in i40e_watchdog_subtask()
10030 i40e_ptp_tx_hang(pf); in i40e_watchdog_subtask()
10035 * @pf: board private structure
10037 static void i40e_reset_subtask(struct i40e_pf *pf) in i40e_reset_subtask() argument
10041 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { in i40e_reset_subtask()
10043 clear_bit(__I40E_REINIT_REQUESTED, pf->state); in i40e_reset_subtask()
10045 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
10047 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
10049 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
10051 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
10053 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
10055 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
10057 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { in i40e_reset_subtask()
10059 clear_bit(__I40E_DOWN_REQUESTED, pf->state); in i40e_reset_subtask()
10065 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { in i40e_reset_subtask()
10066 i40e_prep_for_reset(pf); in i40e_reset_subtask()
10067 i40e_reset(pf); in i40e_reset_subtask()
10068 i40e_rebuild(pf, false, false); in i40e_reset_subtask()
10073 !test_bit(__I40E_DOWN, pf->state) && in i40e_reset_subtask()
10074 !test_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_reset_subtask()
10075 i40e_do_reset(pf, reset_flags, false); in i40e_reset_subtask()
10081 * @pf: board private structure
10084 static void i40e_handle_link_event(struct i40e_pf *pf, in i40e_handle_link_event() argument
10096 i40e_link_event(pf); in i40e_handle_link_event()
10100 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10102 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10111 (!test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))) { in i40e_handle_link_event()
10112 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10114 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10122 * @pf: board private structure
10124 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) in i40e_clean_adminq_subtask() argument
10127 struct i40e_hw *hw = &pf->hw; in i40e_clean_adminq_subtask()
10134 /* Do not run clean AQ when PF reset fails */ in i40e_clean_adminq_subtask()
10135 if (test_bit(__I40E_RESET_FAILED, pf->state)) in i40e_clean_adminq_subtask()
10139 val = rd32(&pf->hw, I40E_PF_ARQLEN); in i40e_clean_adminq_subtask()
10143 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); in i40e_clean_adminq_subtask()
10148 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); in i40e_clean_adminq_subtask()
10150 pf->arq_overflows++; in i40e_clean_adminq_subtask()
10154 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); in i40e_clean_adminq_subtask()
10158 wr32(&pf->hw, I40E_PF_ARQLEN, val); in i40e_clean_adminq_subtask()
10160 val = rd32(&pf->hw, I40E_PF_ATQLEN); in i40e_clean_adminq_subtask()
10163 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10164 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); in i40e_clean_adminq_subtask()
10168 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10169 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); in i40e_clean_adminq_subtask()
10173 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10174 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); in i40e_clean_adminq_subtask()
10178 wr32(&pf->hw, I40E_PF_ATQLEN, val); in i40e_clean_adminq_subtask()
10190 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); in i40e_clean_adminq_subtask()
10199 i40e_handle_link_event(pf, &event); in i40e_clean_adminq_subtask()
10203 ret = i40e_vc_process_vf_msg(pf, in i40e_clean_adminq_subtask()
10211 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); in i40e_clean_adminq_subtask()
10214 i40e_handle_lldp_event(pf, &event); in i40e_clean_adminq_subtask()
10219 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); in i40e_clean_adminq_subtask()
10220 i40e_handle_lan_overflow_event(pf, &event); in i40e_clean_adminq_subtask()
10223 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); in i40e_clean_adminq_subtask()
10228 i40e_debug(&pf->hw, I40E_DEBUG_NVM, in i40e_clean_adminq_subtask()
10233 dev_info(&pf->pdev->dev, in i40e_clean_adminq_subtask()
10241 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); in i40e_clean_adminq_subtask()
10254 * @pf: board private structure
10256 static void i40e_verify_eeprom(struct i40e_pf *pf) in i40e_verify_eeprom() argument
10260 err = i40e_diag_eeprom_test(&pf->hw); in i40e_verify_eeprom()
10263 err = i40e_diag_eeprom_test(&pf->hw); in i40e_verify_eeprom()
10265 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", in i40e_verify_eeprom()
10267 set_bit(__I40E_BAD_EEPROM, pf->state); in i40e_verify_eeprom()
10271 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_verify_eeprom()
10272 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); in i40e_verify_eeprom()
10273 clear_bit(__I40E_BAD_EEPROM, pf->state); in i40e_verify_eeprom()
10279 * @pf: pointer to the PF structure
10283 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) in i40e_enable_pf_switch_lb() argument
10285 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_enable_pf_switch_lb()
10289 ctxt.seid = pf->main_vsi_seid; in i40e_enable_pf_switch_lb()
10290 ctxt.pf_num = pf->hw.pf_id; in i40e_enable_pf_switch_lb()
10292 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_enable_pf_switch_lb()
10294 dev_info(&pf->pdev->dev, in i40e_enable_pf_switch_lb()
10295 "couldn't get PF vsi config, err %pe aq_err %s\n", in i40e_enable_pf_switch_lb()
10297 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_enable_pf_switch_lb()
10306 dev_info(&pf->pdev->dev, in i40e_enable_pf_switch_lb()
10309 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_enable_pf_switch_lb()
10315 * @pf: pointer to the PF structure
10319 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) in i40e_disable_pf_switch_lb() argument
10321 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_disable_pf_switch_lb()
10325 ctxt.seid = pf->main_vsi_seid; in i40e_disable_pf_switch_lb()
10326 ctxt.pf_num = pf->hw.pf_id; in i40e_disable_pf_switch_lb()
10328 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_disable_pf_switch_lb()
10330 dev_info(&pf->pdev->dev, in i40e_disable_pf_switch_lb()
10331 "couldn't get PF vsi config, err %pe aq_err %s\n", in i40e_disable_pf_switch_lb()
10333 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_disable_pf_switch_lb()
10342 dev_info(&pf->pdev->dev, in i40e_disable_pf_switch_lb()
10345 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_disable_pf_switch_lb()
10359 struct i40e_pf *pf = veb->pf; in i40e_config_bridge_mode() local
10361 if (pf->hw.debug_mask & I40E_DEBUG_LAN) in i40e_config_bridge_mode()
10362 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", in i40e_config_bridge_mode()
10365 i40e_disable_pf_switch_lb(pf); in i40e_config_bridge_mode()
10367 i40e_enable_pf_switch_lb(pf); in i40e_config_bridge_mode()
10382 struct i40e_pf *pf = veb->pf; in i40e_reconstitute_veb() local
10387 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { in i40e_reconstitute_veb()
10388 if (pf->vsi[v] && in i40e_reconstitute_veb()
10389 pf->vsi[v]->veb_idx == veb->idx && in i40e_reconstitute_veb()
10390 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { in i40e_reconstitute_veb()
10391 ctl_vsi = pf->vsi[v]; in i40e_reconstitute_veb()
10396 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10401 if (ctl_vsi != pf->vsi[pf->lan_vsi]) in i40e_reconstitute_veb()
10402 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_reconstitute_veb()
10405 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10417 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) in i40e_reconstitute_veb()
10424 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_reconstitute_veb()
10425 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) in i40e_reconstitute_veb()
10428 if (pf->vsi[v]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
10429 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_reconstitute_veb()
10434 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10445 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
10446 pf->veb[veb_idx]->uplink_seid = veb->seid; in i40e_reconstitute_veb()
10447 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); in i40e_reconstitute_veb()
10459 * @pf: the PF struct
10462 static int i40e_get_capabilities(struct i40e_pf *pf, in i40e_get_capabilities() argument
10477 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, in i40e_get_capabilities()
10483 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { in i40e_get_capabilities()
10486 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) { in i40e_get_capabilities()
10487 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10490 i40e_aq_str(&pf->hw, in i40e_get_capabilities()
10491 pf->hw.aq.asq_last_status)); in i40e_get_capabilities()
10496 if (pf->hw.debug_mask & I40E_DEBUG_USER) { in i40e_get_capabilities()
10498 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10499 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", in i40e_get_capabilities()
10500 pf->hw.pf_id, pf->hw.func_caps.num_vfs, in i40e_get_capabilities()
10501 pf->hw.func_caps.num_msix_vectors, in i40e_get_capabilities()
10502 pf->hw.func_caps.num_msix_vectors_vf, in i40e_get_capabilities()
10503 pf->hw.func_caps.fd_filters_guaranteed, in i40e_get_capabilities()
10504 pf->hw.func_caps.fd_filters_best_effort, in i40e_get_capabilities()
10505 pf->hw.func_caps.num_tx_qp, in i40e_get_capabilities()
10506 pf->hw.func_caps.num_vsis); in i40e_get_capabilities()
10508 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10510 pf->hw.dev_caps.switch_mode, in i40e_get_capabilities()
10511 pf->hw.dev_caps.valid_functions); in i40e_get_capabilities()
10512 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10514 pf->hw.dev_caps.sr_iov_1_1, in i40e_get_capabilities()
10515 pf->hw.dev_caps.num_vfs); in i40e_get_capabilities()
10516 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10518 pf->hw.dev_caps.num_vsis, in i40e_get_capabilities()
10519 pf->hw.dev_caps.num_rx_qp, in i40e_get_capabilities()
10520 pf->hw.dev_caps.num_tx_qp); in i40e_get_capabilities()
10524 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ in i40e_get_capabilities()
10525 + pf->hw.func_caps.num_vfs) in i40e_get_capabilities()
10526 if (pf->hw.revision_id == 0 && in i40e_get_capabilities()
10527 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) { in i40e_get_capabilities()
10528 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10530 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); in i40e_get_capabilities()
10531 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; in i40e_get_capabilities()
10541 * @pf: board private structure
10543 static void i40e_fdir_sb_setup(struct i40e_pf *pf) in i40e_fdir_sb_setup() argument
10550 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { in i40e_fdir_sb_setup()
10559 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); in i40e_fdir_sb_setup()
10562 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) in i40e_fdir_sb_setup()
10566 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_fdir_sb_setup()
10570 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, in i40e_fdir_sb_setup()
10571 pf->vsi[pf->lan_vsi]->seid, 0); in i40e_fdir_sb_setup()
10573 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); in i40e_fdir_sb_setup()
10574 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_fdir_sb_setup()
10575 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_fdir_sb_setup()
10585 * @pf: board private structure
10587 static void i40e_fdir_teardown(struct i40e_pf *pf) in i40e_fdir_teardown() argument
10591 i40e_fdir_filter_exit(pf); in i40e_fdir_teardown()
10592 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_fdir_teardown()
10599 * @vsi: PF main vsi
10608 struct i40e_pf *pf = vsi->back; in i40e_rebuild_cloud_filters() local
10613 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, in i40e_rebuild_cloud_filters()
10625 dev_dbg(&pf->pdev->dev, in i40e_rebuild_cloud_filters()
10628 i40e_aq_str(&pf->hw, in i40e_rebuild_cloud_filters()
10629 pf->hw.aq.asq_last_status)); in i40e_rebuild_cloud_filters()
10638 * @vsi: PF main vsi
10714 * @pf: board private structure
10716 * Close up the VFs and other things in prep for PF Reset.
10718 static void i40e_prep_for_reset(struct i40e_pf *pf) in i40e_prep_for_reset() argument
10720 struct i40e_hw *hw = &pf->hw; in i40e_prep_for_reset()
10724 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); in i40e_prep_for_reset()
10725 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_prep_for_reset()
10727 if (i40e_check_asq_alive(&pf->hw)) in i40e_prep_for_reset()
10728 i40e_vc_notify_reset(pf); in i40e_prep_for_reset()
10730 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); in i40e_prep_for_reset()
10733 i40e_pf_quiesce_all_vsi(pf); in i40e_prep_for_reset()
10735 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_prep_for_reset()
10736 if (pf->vsi[v]) { in i40e_prep_for_reset()
10737 i40e_clean_xps_state(pf->vsi[v]); in i40e_prep_for_reset()
10738 pf->vsi[v]->seid = 0; in i40e_prep_for_reset()
10742 i40e_shutdown_adminq(&pf->hw); in i40e_prep_for_reset()
10748 dev_warn(&pf->pdev->dev, in i40e_prep_for_reset()
10755 i40e_ptp_save_hw_time(pf); in i40e_prep_for_reset()
10760 * @pf: PF struct
10762 static void i40e_send_version(struct i40e_pf *pf) in i40e_send_version() argument
10771 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); in i40e_send_version()
10822 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10823 * @pf: board private structure
10825 static int i40e_reset(struct i40e_pf *pf) in i40e_reset() argument
10827 struct i40e_hw *hw = &pf->hw; in i40e_reset()
10832 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); in i40e_reset()
10833 set_bit(__I40E_RESET_FAILED, pf->state); in i40e_reset()
10834 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); in i40e_reset()
10836 pf->pfr_count++; in i40e_reset()
10843 * @pf: board private structure
10848 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) in i40e_rebuild() argument
10850 const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); in i40e_rebuild()
10851 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_rebuild()
10852 struct i40e_hw *hw = &pf->hw; in i40e_rebuild()
10857 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && in i40e_rebuild()
10859 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); in i40e_rebuild()
10861 if (test_bit(__I40E_DOWN, pf->state) && in i40e_rebuild()
10862 !test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_rebuild()
10864 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); in i40e_rebuild()
10867 ret = i40e_init_adminq(&pf->hw); in i40e_rebuild()
10869 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n", in i40e_rebuild()
10871 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10874 i40e_get_oem_version(&pf->hw); in i40e_rebuild()
10876 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) { in i40e_rebuild()
10882 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) in i40e_rebuild()
10883 i40e_verify_eeprom(pf); in i40e_rebuild()
10889 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_rebuild()
10890 if (i40e_get_capabilities(pf, in i40e_rebuild()
10898 if (i40e_setup_misc_vector_for_recovery_mode(pf)) in i40e_rebuild()
10907 free_irq(pf->pdev->irq, pf); in i40e_rebuild()
10908 i40e_clear_interrupt_scheme(pf); in i40e_rebuild()
10909 if (i40e_restore_interrupt_scheme(pf)) in i40e_rebuild()
10914 i40e_send_version(pf); in i40e_rebuild()
10923 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); in i40e_rebuild()
10930 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); in i40e_rebuild()
10935 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); in i40e_rebuild()
10944 if (i40e_is_tc_mqprio_enabled(pf)) { in i40e_rebuild()
10951 dev_warn(&pf->pdev->dev, in i40e_rebuild()
10953 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_rebuild()
10956 ret = i40e_init_pf_dcb(pf); in i40e_rebuild()
10958 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", in i40e_rebuild()
10960 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_rebuild()
10969 ret = i40e_setup_pf_switch(pf, reinit, true); in i40e_rebuild()
10976 ret = i40e_aq_set_phy_int_mask(&pf->hw, in i40e_rebuild()
10981 dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", in i40e_rebuild()
10983 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10990 * to recover minimal use by getting the basic PF VSI working. in i40e_rebuild()
10992 if (vsi->uplink_seid != pf->mac_seid) { in i40e_rebuild()
10993 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); in i40e_rebuild()
10996 if (!pf->veb[v]) in i40e_rebuild()
10999 if (pf->veb[v]->uplink_seid == pf->mac_seid || in i40e_rebuild()
11000 pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
11001 ret = i40e_reconstitute_veb(pf->veb[v]); in i40e_rebuild()
11008 * for minimal rebuild of PF VSI. in i40e_rebuild()
11012 if (pf->veb[v]->uplink_seid == pf->mac_seid) { in i40e_rebuild()
11013 dev_info(&pf->pdev->dev, in i40e_rebuild()
11014 "rebuild of switch failed: %d, will try to set up simple PF connection\n", in i40e_rebuild()
11016 vsi->uplink_seid = pf->mac_seid; in i40e_rebuild()
11018 } else if (pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
11019 dev_info(&pf->pdev->dev, in i40e_rebuild()
11027 if (vsi->uplink_seid == pf->mac_seid) { in i40e_rebuild()
11028 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); in i40e_rebuild()
11032 dev_info(&pf->pdev->dev, in i40e_rebuild()
11060 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs in i40e_rebuild()
11081 if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) { in i40e_rebuild()
11083 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); in i40e_rebuild()
11085 dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", in i40e_rebuild()
11087 i40e_aq_str(&pf->hw, in i40e_rebuild()
11088 pf->hw.aq.asq_last_status)); in i40e_rebuild()
11091 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_rebuild()
11092 ret = i40e_setup_misc_vector(pf); in i40e_rebuild()
11100 * PF/VF VSIs. in i40e_rebuild()
11103 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, in i40e_rebuild()
11104 pf->main_vsi_seid); in i40e_rebuild()
11107 i40e_pf_unquiesce_all_vsi(pf); in i40e_rebuild()
11114 ret = i40e_set_promiscuous(pf, pf->cur_promisc); in i40e_rebuild()
11116 dev_warn(&pf->pdev->dev, in i40e_rebuild()
11118 pf->cur_promisc ? "on" : "off", in i40e_rebuild()
11120 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
11122 i40e_reset_all_vfs(pf, true); in i40e_rebuild()
11125 i40e_send_version(pf); in i40e_rebuild()
11134 clear_bit(__I40E_RESET_FAILED, pf->state); in i40e_rebuild()
11136 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); in i40e_rebuild()
11137 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state); in i40e_rebuild()
11142 * @pf: board private structure
11147 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, in i40e_reset_and_rebuild() argument
11152 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_reset_and_rebuild()
11158 ret = i40e_reset(pf); in i40e_reset_and_rebuild()
11160 i40e_rebuild(pf, reinit, lock_acquired); in i40e_reset_and_rebuild()
11164 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
11165 * @pf: board private structure
11172 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired) in i40e_handle_reset_warning() argument
11174 i40e_prep_for_reset(pf); in i40e_handle_reset_warning()
11175 i40e_reset_and_rebuild(pf, false, lock_acquired); in i40e_handle_reset_warning()
11180 * @pf: pointer to the PF structure
11184 static void i40e_handle_mdd_event(struct i40e_pf *pf) in i40e_handle_mdd_event() argument
11186 struct i40e_hw *hw = &pf->hw; in i40e_handle_mdd_event()
11192 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) in i40e_handle_mdd_event()
11202 pf->hw.func_caps.base_queue; in i40e_handle_mdd_event()
11203 if (netif_msg_tx_err(pf)) in i40e_handle_mdd_event()
11204 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x … in i40e_handle_mdd_event()
11214 pf->hw.func_caps.base_queue; in i40e_handle_mdd_event()
11215 if (netif_msg_rx_err(pf)) in i40e_handle_mdd_event()
11216 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02… in i40e_handle_mdd_event()
11226 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n"); in i40e_handle_mdd_event()
11231 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n"); in i40e_handle_mdd_event()
11236 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { in i40e_handle_mdd_event()
11237 vf = &(pf->vf[i]); in i40e_handle_mdd_event()
11242 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", in i40e_handle_mdd_event()
11244 dev_info(&pf->pdev->dev, in i40e_handle_mdd_event()
11245 "Use PF Control I/F to re-enable the VF\n"); in i40e_handle_mdd_event()
11253 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", in i40e_handle_mdd_event()
11255 dev_info(&pf->pdev->dev, in i40e_handle_mdd_event()
11256 "Use PF Control I/F to re-enable the VF\n"); in i40e_handle_mdd_event()
11262 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); in i40e_handle_mdd_event()
11275 struct i40e_pf *pf = container_of(work, in i40e_service_task() local
11281 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_service_task()
11282 test_bit(__I40E_SUSPENDED, pf->state)) in i40e_service_task()
11285 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) in i40e_service_task()
11288 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_service_task()
11289 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); in i40e_service_task()
11290 i40e_sync_filters_subtask(pf); in i40e_service_task()
11291 i40e_reset_subtask(pf); in i40e_service_task()
11292 i40e_handle_mdd_event(pf); in i40e_service_task()
11293 i40e_vc_process_vflr_event(pf); in i40e_service_task()
11294 i40e_watchdog_subtask(pf); in i40e_service_task()
11295 i40e_fdir_reinit_subtask(pf); in i40e_service_task()
11296 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { in i40e_service_task()
11298 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], in i40e_service_task()
11301 i40e_client_subtask(pf); in i40e_service_task()
11303 pf->state)) in i40e_service_task()
11305 pf->vsi[pf->lan_vsi]); in i40e_service_task()
11307 i40e_sync_filters_subtask(pf); in i40e_service_task()
11309 i40e_reset_subtask(pf); in i40e_service_task()
11312 i40e_clean_adminq_subtask(pf); in i40e_service_task()
11316 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_service_task()
11322 if (time_after(jiffies, (start_time + pf->service_timer_period)) || in i40e_service_task()
11323 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) || in i40e_service_task()
11324 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) || in i40e_service_task()
11325 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) in i40e_service_task()
11326 i40e_service_event_schedule(pf); in i40e_service_task()
11335 struct i40e_pf *pf = from_timer(pf, t, service_timer); in i40e_service_timer() local
11337 mod_timer(&pf->service_timer, in i40e_service_timer()
11338 round_jiffies(jiffies + pf->service_timer_period)); in i40e_service_timer()
11339 i40e_service_event_schedule(pf); in i40e_service_timer()
11348 struct i40e_pf *pf = vsi->back; in i40e_set_num_rings_in_vsi() local
11352 vsi->alloc_queue_pairs = pf->num_lan_qps; in i40e_set_num_rings_in_vsi()
11359 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_set_num_rings_in_vsi()
11360 vsi->num_q_vectors = pf->num_lan_msix; in i40e_set_num_rings_in_vsi()
11372 vsi->num_q_vectors = pf->num_fdsb_msix; in i40e_set_num_rings_in_vsi()
11376 vsi->alloc_queue_pairs = pf->num_vmdq_qps; in i40e_set_num_rings_in_vsi()
11383 vsi->num_q_vectors = pf->num_vmdq_msix; in i40e_set_num_rings_in_vsi()
11387 vsi->alloc_queue_pairs = pf->num_vf_qps; in i40e_set_num_rings_in_vsi()
11453 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11454 * @pf: board private structure
11458 * On success: returns vsi index in PF (positive)
11460 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) in i40e_vsi_mem_alloc() argument
11467 /* Need to protect the allocation of the VSIs at the PF level */ in i40e_vsi_mem_alloc()
11468 mutex_lock(&pf->switch_mutex); in i40e_vsi_mem_alloc()
11476 i = pf->next_vsi; in i40e_vsi_mem_alloc()
11477 while (i < pf->num_alloc_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11479 if (i >= pf->num_alloc_vsi) { in i40e_vsi_mem_alloc()
11481 while (i < pf->next_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11485 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { in i40e_vsi_mem_alloc()
11491 pf->next_vsi = ++i; in i40e_vsi_mem_alloc()
11499 vsi->back = pf; in i40e_vsi_mem_alloc()
11505 pf->rss_table_size : 64; in i40e_vsi_mem_alloc()
11512 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); in i40e_vsi_mem_alloc()
11530 pf->vsi[vsi_idx] = vsi; in i40e_vsi_mem_alloc()
11536 pf->next_vsi = i - 1; in i40e_vsi_mem_alloc()
11539 mutex_unlock(&pf->switch_mutex); in i40e_vsi_mem_alloc()
11587 struct i40e_pf *pf; in i40e_vsi_clear() local
11594 pf = vsi->back; in i40e_vsi_clear()
11596 mutex_lock(&pf->switch_mutex); in i40e_vsi_clear()
11597 if (!pf->vsi[vsi->idx]) { in i40e_vsi_clear()
11598 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n", in i40e_vsi_clear()
11603 if (pf->vsi[vsi->idx] != vsi) { in i40e_vsi_clear()
11604 dev_err(&pf->pdev->dev, in i40e_vsi_clear()
11605 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n", in i40e_vsi_clear()
11606 pf->vsi[vsi->idx]->idx, in i40e_vsi_clear()
11607 pf->vsi[vsi->idx]->type, in i40e_vsi_clear()
11612 /* updates the PF for this cleared vsi */ in i40e_vsi_clear()
11613 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_clear()
11614 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); in i40e_vsi_clear()
11620 pf->vsi[vsi->idx] = NULL; in i40e_vsi_clear()
11621 if (vsi->idx < pf->next_vsi) in i40e_vsi_clear()
11622 pf->next_vsi = vsi->idx; in i40e_vsi_clear()
11625 mutex_unlock(&pf->switch_mutex); in i40e_vsi_clear()
11658 struct i40e_pf *pf = vsi->back; in i40e_alloc_rings() local
11673 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11679 ring->itr_setting = pf->tx_itr_default; in i40e_alloc_rings()
11690 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11697 ring->itr_setting = pf->tx_itr_default; in i40e_alloc_rings()
11706 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11710 ring->itr_setting = pf->rx_itr_default; in i40e_alloc_rings()
11723 * @pf: board private structure
11728 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) in i40e_reserve_msix_vectors() argument
11730 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, in i40e_reserve_msix_vectors()
11733 dev_info(&pf->pdev->dev, in i40e_reserve_msix_vectors()
11743 * @pf: board private structure
11749 static int i40e_init_msix(struct i40e_pf *pf) in i40e_init_msix() argument
11751 struct i40e_hw *hw = &pf->hw; in i40e_init_msix()
11758 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_init_msix()
11785 /* reserve some vectors for the main PF traffic queues. Initially we in i40e_init_msix()
11793 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2); in i40e_init_msix()
11794 vectors_left -= pf->num_lan_msix; in i40e_init_msix()
11797 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { in i40e_init_msix()
11799 pf->num_fdsb_msix = 1; in i40e_init_msix()
11803 pf->num_fdsb_msix = 0; in i40e_init_msix()
11808 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { in i40e_init_msix()
11809 iwarp_requested = pf->num_iwarp_msix; in i40e_init_msix()
11812 pf->num_iwarp_msix = 0; in i40e_init_msix()
11813 else if (vectors_left < pf->num_iwarp_msix) in i40e_init_msix()
11814 pf->num_iwarp_msix = 1; in i40e_init_msix()
11815 v_budget += pf->num_iwarp_msix; in i40e_init_msix()
11816 vectors_left -= pf->num_iwarp_msix; in i40e_init_msix()
11820 if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags)) { in i40e_init_msix()
11822 pf->num_vmdq_msix = 0; in i40e_init_msix()
11823 pf->num_vmdq_qps = 0; in i40e_init_msix()
11826 pf->num_vmdq_vsis * pf->num_vmdq_qps; in i40e_init_msix()
11833 * queues/vectors used by the PF later with the ethtool in i40e_init_msix()
11837 pf->num_vmdq_qps = 1; in i40e_init_msix()
11838 vmdq_vecs_wanted = pf->num_vmdq_vsis; in i40e_init_msix()
11843 pf->num_vmdq_msix = pf->num_vmdq_qps; in i40e_init_msix()
11859 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left); in i40e_init_msix()
11860 pf->num_lan_msix += extra_vectors; in i40e_init_msix()
11866 v_budget += pf->num_lan_msix; in i40e_init_msix()
11867 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), in i40e_init_msix()
11869 if (!pf->msix_entries) in i40e_init_msix()
11873 pf->msix_entries[i].entry = i; in i40e_init_msix()
11874 v_actual = i40e_reserve_msix_vectors(pf, v_budget); in i40e_init_msix()
11877 clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); in i40e_init_msix()
11878 kfree(pf->msix_entries); in i40e_init_msix()
11879 pf->msix_entries = NULL; in i40e_init_msix()
11880 pci_disable_msix(pf->pdev); in i40e_init_msix()
11885 pf->num_vmdq_vsis = 0; in i40e_init_msix()
11886 pf->num_vmdq_qps = 0; in i40e_init_msix()
11887 pf->num_lan_qps = 1; in i40e_init_msix()
11888 pf->num_lan_msix = 1; in i40e_init_msix()
11898 dev_info(&pf->pdev->dev, in i40e_init_msix()
11905 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ in i40e_init_msix()
11906 pf->num_vmdq_vsis = 1; in i40e_init_msix()
11907 pf->num_vmdq_qps = 1; in i40e_init_msix()
11912 pf->num_lan_msix = 1; in i40e_init_msix()
11915 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { in i40e_init_msix()
11916 pf->num_lan_msix = 1; in i40e_init_msix()
11917 pf->num_iwarp_msix = 1; in i40e_init_msix()
11919 pf->num_lan_msix = 2; in i40e_init_msix()
11923 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { in i40e_init_msix()
11924 pf->num_iwarp_msix = min_t(int, (vec / 3), in i40e_init_msix()
11926 pf->num_vmdq_vsis = min_t(int, (vec / 3), in i40e_init_msix()
11929 pf->num_vmdq_vsis = min_t(int, (vec / 2), in i40e_init_msix()
11932 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { in i40e_init_msix()
11933 pf->num_fdsb_msix = 1; in i40e_init_msix()
11936 pf->num_lan_msix = min_t(int, in i40e_init_msix()
11937 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), in i40e_init_msix()
11938 pf->num_lan_msix); in i40e_init_msix()
11939 pf->num_lan_qps = pf->num_lan_msix; in i40e_init_msix()
11944 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && pf->num_fdsb_msix == 0) { in i40e_init_msix()
11945 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11946 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_init_msix()
11947 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_init_msix()
11949 if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_msix == 0) { in i40e_init_msix()
11950 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11951 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); in i40e_init_msix()
11954 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) && in i40e_init_msix()
11955 pf->num_iwarp_msix == 0) { in i40e_init_msix()
11956 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11957 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); in i40e_init_msix()
11959 i40e_debug(&pf->hw, I40E_DEBUG_INIT, in i40e_init_msix()
11960 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", in i40e_init_msix()
11961 pf->num_lan_msix, in i40e_init_msix()
11962 pf->num_vmdq_msix * pf->num_vmdq_vsis, in i40e_init_msix()
11963 pf->num_fdsb_msix, in i40e_init_msix()
11964 pf->num_iwarp_msix); in i40e_init_msix()
12007 struct i40e_pf *pf = vsi->back; in i40e_vsi_alloc_q_vectors() local
12011 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_vsi_alloc_q_vectors()
12013 else if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_alloc_q_vectors()
12035 * @pf: board private structure to initialize
12037 static int i40e_init_interrupt_scheme(struct i40e_pf *pf) in i40e_init_interrupt_scheme() argument
12042 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_init_interrupt_scheme()
12043 vectors = i40e_init_msix(pf); in i40e_init_interrupt_scheme()
12045 clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); in i40e_init_interrupt_scheme()
12046 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); in i40e_init_interrupt_scheme()
12047 clear_bit(I40E_FLAG_RSS_ENA, pf->flags); in i40e_init_interrupt_scheme()
12048 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_init_interrupt_scheme()
12049 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_init_interrupt_scheme()
12050 clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); in i40e_init_interrupt_scheme()
12051 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_init_interrupt_scheme()
12052 clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); in i40e_init_interrupt_scheme()
12053 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); in i40e_init_interrupt_scheme()
12054 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_init_interrupt_scheme()
12057 i40e_determine_queue_usage(pf); in i40e_init_interrupt_scheme()
12061 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && in i40e_init_interrupt_scheme()
12062 test_bit(I40E_FLAG_MSI_ENA, pf->flags)) { in i40e_init_interrupt_scheme()
12063 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); in i40e_init_interrupt_scheme()
12064 vectors = pci_enable_msi(pf->pdev); in i40e_init_interrupt_scheme()
12066 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", in i40e_init_interrupt_scheme()
12068 clear_bit(I40E_FLAG_MSI_ENA, pf->flags); in i40e_init_interrupt_scheme()
12073 if (!test_bit(I40E_FLAG_MSI_ENA, pf->flags) && in i40e_init_interrupt_scheme()
12074 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_init_interrupt_scheme()
12075 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); in i40e_init_interrupt_scheme()
12079 pf->irq_pile = kzalloc(size, GFP_KERNEL); in i40e_init_interrupt_scheme()
12080 if (!pf->irq_pile) in i40e_init_interrupt_scheme()
12083 pf->irq_pile->num_entries = vectors; in i40e_init_interrupt_scheme()
12086 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); in i40e_init_interrupt_scheme()
12093 * @pf: private board data structure
12099 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) in i40e_restore_interrupt_scheme() argument
12107 set_bit(I40E_FLAG_MSI_ENA, pf->flags); in i40e_restore_interrupt_scheme()
12108 set_bit(I40E_FLAG_MSIX_ENA, pf->flags); in i40e_restore_interrupt_scheme()
12110 err = i40e_init_interrupt_scheme(pf); in i40e_restore_interrupt_scheme()
12117 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_restore_interrupt_scheme()
12118 if (pf->vsi[i]) { in i40e_restore_interrupt_scheme()
12119 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12122 i40e_vsi_map_rings_to_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12126 err = i40e_setup_misc_vector(pf); in i40e_restore_interrupt_scheme()
12130 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) in i40e_restore_interrupt_scheme()
12131 i40e_client_update_msix_info(pf); in i40e_restore_interrupt_scheme()
12137 if (pf->vsi[i]) in i40e_restore_interrupt_scheme()
12138 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12147 * @pf: board private structure
12154 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf) in i40e_setup_misc_vector_for_recovery_mode() argument
12158 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_setup_misc_vector_for_recovery_mode()
12159 err = i40e_setup_misc_vector(pf); in i40e_setup_misc_vector_for_recovery_mode()
12162 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector_for_recovery_mode()
12168 u32 flags = test_bit(I40E_FLAG_MSI_ENA, pf->flags) ? 0 : IRQF_SHARED; in i40e_setup_misc_vector_for_recovery_mode()
12170 err = request_irq(pf->pdev->irq, i40e_intr, flags, in i40e_setup_misc_vector_for_recovery_mode()
12171 pf->int_name, pf); in i40e_setup_misc_vector_for_recovery_mode()
12174 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector_for_recovery_mode()
12179 i40e_enable_misc_int_causes(pf); in i40e_setup_misc_vector_for_recovery_mode()
12180 i40e_irq_dynamic_enable_icr0(pf); in i40e_setup_misc_vector_for_recovery_mode()
12188 * @pf: board private structure
12194 static int i40e_setup_misc_vector(struct i40e_pf *pf) in i40e_setup_misc_vector() argument
12196 struct i40e_hw *hw = &pf->hw; in i40e_setup_misc_vector()
12200 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) { in i40e_setup_misc_vector()
12201 err = request_irq(pf->msix_entries[0].vector, in i40e_setup_misc_vector()
12202 i40e_intr, 0, pf->int_name, pf); in i40e_setup_misc_vector()
12204 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); in i40e_setup_misc_vector()
12205 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector()
12207 pf->int_name, err); in i40e_setup_misc_vector()
12212 i40e_enable_misc_int_causes(pf); in i40e_setup_misc_vector()
12220 i40e_irq_dynamic_enable_icr0(pf); in i40e_setup_misc_vector()
12237 struct i40e_pf *pf = vsi->back; in i40e_get_rss_aq() local
12238 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_aq()
12245 dev_info(&pf->pdev->dev, in i40e_get_rss_aq()
12248 i40e_aq_str(&pf->hw, in i40e_get_rss_aq()
12249 pf->hw.aq.asq_last_status)); in i40e_get_rss_aq()
12259 dev_info(&pf->pdev->dev, in i40e_get_rss_aq()
12262 i40e_aq_str(&pf->hw, in i40e_get_rss_aq()
12263 pf->hw.aq.asq_last_status)); in i40e_get_rss_aq()
12283 struct i40e_pf *pf = vsi->back; in i40e_config_rss_reg() local
12284 struct i40e_hw *hw = &pf->hw; in i40e_config_rss_reg()
12299 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); in i40e_config_rss_reg()
12317 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); in i40e_config_rss_reg()
12337 struct i40e_pf *pf = vsi->back; in i40e_get_rss_reg() local
12338 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_reg()
12370 struct i40e_pf *pf = vsi->back; in i40e_config_rss() local
12372 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) in i40e_config_rss()
12389 struct i40e_pf *pf = vsi->back; in i40e_get_rss() local
12391 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) in i40e_get_rss()
12399 * @pf: Pointer to board private structure
12404 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, in i40e_fill_rss_lut() argument
12415 * @pf: board private structure
12417 static int i40e_pf_config_rss(struct i40e_pf *pf) in i40e_pf_config_rss() argument
12419 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_pf_config_rss()
12422 struct i40e_hw *hw = &pf->hw; in i40e_pf_config_rss()
12430 hena |= i40e_pf_get_default_rss_hena(pf); in i40e_pf_config_rss()
12437 reg_val = (pf->rss_table_size == 512) ? in i40e_pf_config_rss()
12452 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_pf_config_rss()
12465 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_pf_config_rss()
12482 * @pf: board private structure
12489 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) in i40e_reconfig_rss_queues() argument
12491 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_reconfig_rss_queues()
12494 if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags)) in i40e_reconfig_rss_queues()
12498 new_rss_size = min_t(int, queue_count, pf->rss_size_max); in i40e_reconfig_rss_queues()
12504 i40e_prep_for_reset(pf); in i40e_reconfig_rss_queues()
12505 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_reconfig_rss_queues()
12506 return pf->alloc_rss_size; in i40e_reconfig_rss_queues()
12508 pf->alloc_rss_size = new_rss_size; in i40e_reconfig_rss_queues()
12510 i40e_reset_and_rebuild(pf, true, true); in i40e_reconfig_rss_queues()
12517 dev_dbg(&pf->pdev->dev, in i40e_reconfig_rss_queues()
12523 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_reconfig_rss_queues()
12525 i40e_pf_config_rss(pf); in i40e_reconfig_rss_queues()
12527 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n", in i40e_reconfig_rss_queues()
12528 vsi->req_queue_pairs, pf->rss_size_max); in i40e_reconfig_rss_queues()
12529 return pf->alloc_rss_size; in i40e_reconfig_rss_queues()
12533 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12534 * @pf: board private structure
12536 int i40e_get_partition_bw_setting(struct i40e_pf *pf) in i40e_get_partition_bw_setting() argument
12542 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, in i40e_get_partition_bw_setting()
12547 pf->min_bw = min_bw; in i40e_get_partition_bw_setting()
12549 pf->max_bw = max_bw; in i40e_get_partition_bw_setting()
12556 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12557 * @pf: board private structure
12559 int i40e_set_partition_bw_setting(struct i40e_pf *pf) in i40e_set_partition_bw_setting() argument
12566 /* Set the valid bit for this PF */ in i40e_set_partition_bw_setting()
12567 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); in i40e_set_partition_bw_setting()
12568 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK; in i40e_set_partition_bw_setting()
12569 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK; in i40e_set_partition_bw_setting()
12572 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); in i40e_set_partition_bw_setting()
12578 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12579 * @pf: board private structure
12581 int i40e_commit_partition_bw_setting(struct i40e_pf *pf) in i40e_commit_partition_bw_setting() argument
12588 if (pf->hw.partition_id != 1) { in i40e_commit_partition_bw_setting()
12589 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12591 pf->hw.partition_id); in i40e_commit_partition_bw_setting()
12597 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); in i40e_commit_partition_bw_setting()
12598 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12600 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12603 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12608 ret = i40e_aq_read_nvm(&pf->hw, in i40e_commit_partition_bw_setting()
12615 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12616 i40e_release_nvm(&pf->hw); in i40e_commit_partition_bw_setting()
12618 dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n", in i40e_commit_partition_bw_setting()
12620 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12628 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); in i40e_commit_partition_bw_setting()
12629 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12631 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12634 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12641 ret = i40e_aq_update_nvm(&pf->hw, in i40e_commit_partition_bw_setting()
12648 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12649 i40e_release_nvm(&pf->hw); in i40e_commit_partition_bw_setting()
12651 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12654 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12662 * if total port shutdown feature is enabled for this PF
12663 * @pf: board private structure
12665 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf) in i40e_is_total_port_shutdown_enabled() argument
12680 read_status = i40e_read_nvm_word(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12685 read_status = i40e_read_nvm_word(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12692 read_status = i40e_read_nvm_module_data(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12700 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH); in i40e_is_total_port_shutdown_enabled()
12706 dev_warn(&pf->pdev->dev, in i40e_is_total_port_shutdown_enabled()
12714 * @pf: board private structure to initialize
12720 static int i40e_sw_init(struct i40e_pf *pf) in i40e_sw_init() argument
12727 bitmap_zero(pf->flags, I40E_PF_FLAGS_NBITS); in i40e_sw_init()
12728 set_bit(I40E_FLAG_MSI_ENA, pf->flags); in i40e_sw_init()
12729 set_bit(I40E_FLAG_MSIX_ENA, pf->flags); in i40e_sw_init()
12732 pf->rx_itr_default = I40E_ITR_RX_DEF; in i40e_sw_init()
12733 pf->tx_itr_default = I40E_ITR_TX_DEF; in i40e_sw_init()
12735 /* Depending on PF configurations, it is possible that the RSS in i40e_sw_init()
12738 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); in i40e_sw_init()
12739 pf->alloc_rss_size = 1; in i40e_sw_init()
12740 pf->rss_table_size = pf->hw.func_caps.rss_table_size; in i40e_sw_init()
12741 pf->rss_size_max = min_t(int, pf->rss_size_max, in i40e_sw_init()
12742 pf->hw.func_caps.num_tx_qp); in i40e_sw_init()
12746 pf->rss_size_max = min_t(int, pf->rss_size_max, pow); in i40e_sw_init()
12748 if (pf->hw.func_caps.rss) { in i40e_sw_init()
12749 set_bit(I40E_FLAG_RSS_ENA, pf->flags); in i40e_sw_init()
12750 pf->alloc_rss_size = min_t(int, pf->rss_size_max, in i40e_sw_init()
12755 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { in i40e_sw_init()
12756 set_bit(I40E_FLAG_MFP_ENA, pf->flags); in i40e_sw_init()
12757 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); in i40e_sw_init()
12758 if (i40e_get_partition_bw_setting(pf)) { in i40e_sw_init()
12759 dev_warn(&pf->pdev->dev, in i40e_sw_init()
12762 dev_info(&pf->pdev->dev, in i40e_sw_init()
12764 pf->min_bw, pf->max_bw); in i40e_sw_init()
12767 i40e_set_partition_bw_setting(pf); in i40e_sw_init()
12771 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || in i40e_sw_init()
12772 (pf->hw.func_caps.fd_filters_best_effort > 0)) { in i40e_sw_init()
12773 set_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); in i40e_sw_init()
12774 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && in i40e_sw_init()
12775 pf->hw.num_partitions > 1) in i40e_sw_init()
12776 dev_info(&pf->pdev->dev, in i40e_sw_init()
12779 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_sw_init()
12780 pf->fdir_pf_filter_count = in i40e_sw_init()
12781 pf->hw.func_caps.fd_filters_guaranteed; in i40e_sw_init()
12782 pf->hw.fdir_shared_filter_count = in i40e_sw_init()
12783 pf->hw.func_caps.fd_filters_best_effort; in i40e_sw_init()
12787 if (test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps)) in i40e_sw_init()
12788 set_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags); in i40e_sw_init()
12790 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { in i40e_sw_init()
12791 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; in i40e_sw_init()
12792 set_bit(I40E_FLAG_VMDQ_ENA, pf->flags); in i40e_sw_init()
12793 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); in i40e_sw_init()
12796 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { in i40e_sw_init()
12797 set_bit(I40E_FLAG_IWARP_ENA, pf->flags); in i40e_sw_init()
12799 pf->num_iwarp_msix = (int)num_online_cpus() + 1; in i40e_sw_init()
12806 if (pf->hw.mac.type == I40E_MAC_XL710 && in i40e_sw_init()
12807 pf->hw.func_caps.npar_enable) in i40e_sw_init()
12808 clear_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps); in i40e_sw_init()
12811 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { in i40e_sw_init()
12812 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; in i40e_sw_init()
12813 set_bit(I40E_FLAG_SRIOV_ENA, pf->flags); in i40e_sw_init()
12814 pf->num_req_vfs = min_t(int, in i40e_sw_init()
12815 pf->hw.func_caps.num_vfs, in i40e_sw_init()
12819 pf->lan_veb = I40E_NO_VEB; in i40e_sw_init()
12820 pf->lan_vsi = I40E_NO_VSI; in i40e_sw_init()
12823 clear_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); in i40e_sw_init()
12827 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); in i40e_sw_init()
12828 pf->qp_pile = kzalloc(size, GFP_KERNEL); in i40e_sw_init()
12829 if (!pf->qp_pile) { in i40e_sw_init()
12833 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; in i40e_sw_init()
12835 pf->tx_timeout_recovery_level = 1; in i40e_sw_init()
12837 if (pf->hw.mac.type != I40E_MAC_X722 && in i40e_sw_init()
12838 i40e_is_total_port_shutdown_enabled(pf)) { in i40e_sw_init()
12842 set_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); in i40e_sw_init()
12843 set_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); in i40e_sw_init()
12844 dev_info(&pf->pdev->dev, in i40e_sw_init()
12847 mutex_init(&pf->switch_mutex); in i40e_sw_init()
12855 * @pf: board private structure to initialize
12860 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) in i40e_set_ntuple() argument
12869 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) in i40e_set_ntuple()
12874 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { in i40e_set_ntuple()
12875 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_set_ntuple()
12876 clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_set_ntuple()
12880 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { in i40e_set_ntuple()
12882 i40e_fdir_filter_exit(pf); in i40e_set_ntuple()
12884 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_set_ntuple()
12885 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); in i40e_set_ntuple()
12886 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_set_ntuple()
12889 pf->fd_add_err = 0; in i40e_set_ntuple()
12890 pf->fd_atr_cnt = 0; in i40e_set_ntuple()
12892 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_set_ntuple()
12893 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && in i40e_set_ntuple()
12894 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_set_ntuple()
12895 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); in i40e_set_ntuple()
12906 struct i40e_pf *pf = vsi->back; in i40e_clear_rss_lut() local
12907 struct i40e_hw *hw = &pf->hw; in i40e_clear_rss_lut()
12918 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); in i40e_clear_rss_lut()
12923 * i40e_set_loopback - turn on/off loopback mode on underlying PF
12956 struct i40e_pf *pf = vsi->back; in i40e_set_features() local
12960 i40e_pf_config_rss(pf); in i40e_set_features()
12971 (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) { in i40e_set_features()
12972 dev_err(&pf->pdev->dev, in i40e_set_features()
12980 need_reset = i40e_set_ntuple(pf, features); in i40e_set_features()
12983 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); in i40e_set_features()
13039 struct i40e_pf *pf = np->vsi->back; in i40e_get_phys_port_id() local
13040 struct i40e_hw *hw = &pf->hw; in i40e_get_phys_port_id()
13042 if (!test_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps)) in i40e_get_phys_port_id()
13068 struct i40e_pf *pf = np->vsi->back; in i40e_ndo_fdb_add() local
13071 if (!test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) in i40e_ndo_fdb_add()
13111 * is to change the mode then that requires a PF reset to
13124 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_setlink() local
13129 /* Only for PF VSI for now */ in i40e_ndo_bridge_setlink()
13130 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_setlink()
13133 /* Find the HW bridge for PF VSI */ in i40e_ndo_bridge_setlink()
13135 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_setlink()
13136 veb = pf->veb[i]; in i40e_ndo_bridge_setlink()
13156 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_ndo_bridge_setlink()
13171 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); in i40e_ndo_bridge_setlink()
13173 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); in i40e_ndo_bridge_setlink()
13174 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); in i40e_ndo_bridge_setlink()
13201 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_getlink() local
13205 /* Only for PF VSI for now */ in i40e_ndo_bridge_getlink()
13206 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_getlink()
13209 /* Find the HW bridge for the PF VSI */ in i40e_ndo_bridge_getlink()
13211 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_getlink()
13212 veb = pf->veb[i]; in i40e_ndo_bridge_getlink()
13290 struct i40e_pf *pf = vsi->back; in i40e_xdp_setup() local
13305 i40e_prep_for_reset(pf); in i40e_xdp_setup()
13308 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_xdp_setup()
13319 i40e_reset_and_rebuild(pf, true, true); in i40e_xdp_setup()
13358 struct i40e_pf *pf = vsi->back; in i40e_enter_busy_conf() local
13361 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_enter_busy_conf()
13377 struct i40e_pf *pf = vsi->back; in i40e_exit_busy_conf() local
13379 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_exit_busy_conf()
13452 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_toggle_rings() local
13456 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, in i40e_queue_pair_toggle_rings()
13459 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13465 i40e_control_rx_q(pf, pf_q, enable); in i40e_queue_pair_toggle_rings()
13466 ret = i40e_pf_rxq_wait(pf, pf_q, enable); in i40e_queue_pair_toggle_rings()
13468 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13483 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_queue_pair_toggle_rings()
13487 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13503 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_enable_irq() local
13504 struct i40e_hw *hw = &pf->hw; in i40e_queue_pair_enable_irq()
13507 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_queue_pair_enable_irq()
13510 i40e_irq_dynamic_enable_icr0(pf); in i40e_queue_pair_enable_irq()
13523 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_disable_irq() local
13524 struct i40e_hw *hw = &pf->hw; in i40e_queue_pair_disable_irq()
13532 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_queue_pair_disable_irq()
13537 synchronize_irq(pf->msix_entries[intpf].vector); in i40e_queue_pair_disable_irq()
13543 synchronize_irq(pf->pdev->irq); in i40e_queue_pair_disable_irq()
13678 struct i40e_pf *pf = vsi->back; in i40e_config_netdev() local
13679 struct i40e_hw *hw = &pf->hw; in i40e_config_netdev()
13717 if (!test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps)) in i40e_config_netdev()
13720 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic; in i40e_config_netdev()
13753 if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) in i40e_config_netdev()
13764 SET_NETDEV_DEV(netdev, &pf->pdev->dev); in i40e_config_netdev()
13794 pf->vsi[pf->lan_vsi]->netdev->name); in i40e_config_netdev()
13866 struct i40e_pf *pf = vsi->back; in i40e_is_vsi_uplink_mode_veb() local
13872 veb = pf->veb[vsi->veb_idx]; in i40e_is_vsi_uplink_mode_veb()
13874 dev_info(&pf->pdev->dev, in i40e_is_vsi_uplink_mode_veb()
13901 struct i40e_pf *pf = vsi->back; in i40e_add_vsi() local
13902 struct i40e_hw *hw = &pf->hw; in i40e_add_vsi()
13914 /* The PF's main VSI is already setup as part of the in i40e_add_vsi()
13919 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13920 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13922 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_add_vsi()
13925 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13926 "couldn't get PF vsi config, err %pe aq_err %s\n", in i40e_add_vsi()
13928 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13929 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13938 enabled_tc = i40e_pf_get_tc_map(pf); in i40e_add_vsi()
13944 if (test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, pf->flags)) { in i40e_add_vsi()
13946 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13947 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13955 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13958 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13959 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13966 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && in i40e_add_vsi()
13967 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ in i40e_add_vsi()
13969 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13970 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13975 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13978 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13979 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13990 * For MFP case the iSCSI PF would use this in i40e_add_vsi()
13998 dev_info(&pf->pdev->dev, in i40e_add_vsi()
14002 i40e_aq_str(&pf->hw, in i40e_add_vsi()
14003 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14014 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags) && in i40e_add_vsi()
14072 if (pf->vf[vsi->vf_id].spoofchk) { in i40e_add_vsi()
14097 i40e_aq_str(&pf->hw, in i40e_add_vsi()
14098 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14120 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); in i40e_add_vsi()
14126 dev_info(&pf->pdev->dev, in i40e_add_vsi()
14129 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14149 struct i40e_pf *pf; in i40e_vsi_release() local
14153 pf = vsi->back; in i40e_vsi_release()
14157 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", in i40e_vsi_release()
14161 if (vsi == pf->vsi[pf->lan_vsi] && in i40e_vsi_release()
14162 !test_bit(__I40E_DOWN, pf->state)) { in i40e_vsi_release()
14163 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); in i40e_vsi_release()
14183 i40e_devlink_destroy_port(pf); in i40e_vsi_release()
14218 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_release()
14219 if (pf->vsi[i] && in i40e_vsi_release()
14220 pf->vsi[i]->uplink_seid == uplink_seid && in i40e_vsi_release()
14221 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_vsi_release()
14226 if (!pf->veb[i]) in i40e_vsi_release()
14228 if (pf->veb[i]->uplink_seid == uplink_seid) in i40e_vsi_release()
14230 if (pf->veb[i]->seid == uplink_seid) in i40e_vsi_release()
14231 veb = pf->veb[i]; in i40e_vsi_release()
14252 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_vectors() local
14255 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", in i40e_vsi_setup_vectors()
14261 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", in i40e_vsi_setup_vectors()
14268 dev_info(&pf->pdev->dev, in i40e_vsi_setup_vectors()
14278 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_vsi_setup_vectors()
14281 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_vsi_setup_vectors()
14284 dev_info(&pf->pdev->dev, in i40e_vsi_setup_vectors()
14308 struct i40e_pf *pf; in i40e_vsi_reinit_setup() local
14315 pf = vsi->back; in i40e_vsi_reinit_setup()
14317 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_reinit_setup()
14329 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_reinit_setup()
14331 dev_info(&pf->pdev->dev, in i40e_vsi_reinit_setup()
14341 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_vsi_reinit_setup()
14342 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_vsi_reinit_setup()
14343 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_vsi_reinit_setup()
14344 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_vsi_reinit_setup()
14346 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); in i40e_vsi_reinit_setup()
14366 i40e_devlink_destroy_port(pf); in i40e_vsi_reinit_setup()
14367 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_reinit_setup()
14375 * @pf: board private structure
14386 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, in i40e_vsi_setup() argument
14396 * - the PF's port seid in i40e_vsi_setup()
14397 * no VEB is needed because this is the PF in i40e_vsi_setup()
14403 * - seid of the PF VSI, which is what creates the first VEB in i40e_vsi_setup()
14409 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { in i40e_vsi_setup()
14410 veb = pf->veb[i]; in i40e_vsi_setup()
14415 if (!veb && uplink_seid != pf->mac_seid) { in i40e_vsi_setup()
14417 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_setup()
14418 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { in i40e_vsi_setup()
14419 vsi = pf->vsi[i]; in i40e_vsi_setup()
14424 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", in i40e_vsi_setup()
14429 if (vsi->uplink_seid == pf->mac_seid) in i40e_vsi_setup()
14430 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, in i40e_vsi_setup()
14433 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_vsi_setup()
14436 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { in i40e_vsi_setup()
14445 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { in i40e_vsi_setup()
14447 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); in i40e_vsi_setup()
14452 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_vsi_setup()
14453 veb = pf->veb[i]; in i40e_vsi_setup()
14456 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); in i40e_vsi_setup()
14465 v_idx = i40e_vsi_mem_alloc(pf, type); in i40e_vsi_setup()
14468 vsi = pf->vsi[v_idx]; in i40e_vsi_setup()
14475 pf->lan_vsi = v_idx; in i40e_vsi_setup()
14482 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_setup()
14484 dev_info(&pf->pdev->dev, in i40e_vsi_setup()
14508 ret = i40e_devlink_create_port(pf); in i40e_vsi_setup()
14511 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); in i40e_vsi_setup()
14543 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) && in i40e_vsi_setup()
14564 i40e_devlink_destroy_port(pf); in i40e_vsi_setup()
14566 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_setup()
14583 struct i40e_pf *pf = veb->pf; in i40e_veb_get_bw_info() local
14584 struct i40e_hw *hw = &pf->hw; in i40e_veb_get_bw_info()
14592 dev_info(&pf->pdev->dev, in i40e_veb_get_bw_info()
14595 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); in i40e_veb_get_bw_info()
14602 dev_info(&pf->pdev->dev, in i40e_veb_get_bw_info()
14605 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); in i40e_veb_get_bw_info()
14627 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14628 * @pf: board private structure
14631 * On success: returns vsi index in PF (positive)
14633 static int i40e_veb_mem_alloc(struct i40e_pf *pf) in i40e_veb_mem_alloc() argument
14639 /* Need to protect the allocation of switch elements at the PF level */ in i40e_veb_mem_alloc()
14640 mutex_lock(&pf->switch_mutex); in i40e_veb_mem_alloc()
14649 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) in i40e_veb_mem_alloc()
14661 veb->pf = pf; in i40e_veb_mem_alloc()
14665 pf->veb[i] = veb; in i40e_veb_mem_alloc()
14668 mutex_unlock(&pf->switch_mutex); in i40e_veb_mem_alloc()
14681 struct i40e_pf *pf = branch->pf; in i40e_switch_branch_release() local
14688 if (!pf->veb[i]) in i40e_switch_branch_release()
14690 if (pf->veb[i]->uplink_seid == branch->seid) in i40e_switch_branch_release()
14691 i40e_switch_branch_release(pf->veb[i]); in i40e_switch_branch_release()
14699 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_switch_branch_release()
14700 if (!pf->vsi[i]) in i40e_switch_branch_release()
14702 if (pf->vsi[i]->uplink_seid == branch_seid && in i40e_switch_branch_release()
14703 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_switch_branch_release()
14704 i40e_vsi_release(pf->vsi[i]); in i40e_switch_branch_release()
14713 if (pf->veb[veb_idx]) in i40e_switch_branch_release()
14714 i40e_veb_release(pf->veb[veb_idx]); in i40e_switch_branch_release()
14726 if (veb->pf) { in i40e_veb_clear()
14727 struct i40e_pf *pf = veb->pf; in i40e_veb_clear() local
14729 mutex_lock(&pf->switch_mutex); in i40e_veb_clear()
14730 if (pf->veb[veb->idx] == veb) in i40e_veb_clear()
14731 pf->veb[veb->idx] = NULL; in i40e_veb_clear()
14732 mutex_unlock(&pf->switch_mutex); in i40e_veb_clear()
14745 struct i40e_pf *pf; in i40e_veb_release() local
14748 pf = veb->pf; in i40e_veb_release()
14751 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_veb_release()
14752 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { in i40e_veb_release()
14754 vsi = pf->vsi[i]; in i40e_veb_release()
14758 dev_info(&pf->pdev->dev, in i40e_veb_release()
14768 if (veb->uplink_seid == pf->mac_seid) in i40e_veb_release()
14774 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_veb_release()
14775 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; in i40e_veb_release()
14778 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); in i40e_veb_release()
14789 struct i40e_pf *pf = veb->pf; in i40e_add_veb() local
14790 bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); in i40e_add_veb()
14793 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, in i40e_add_veb()
14799 dev_info(&pf->pdev->dev, in i40e_add_veb()
14802 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14807 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, in i40e_add_veb()
14810 dev_info(&pf->pdev->dev, in i40e_add_veb()
14813 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14818 dev_info(&pf->pdev->dev, in i40e_add_veb()
14821 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14822 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); in i40e_add_veb()
14835 * @pf: board private structure
14849 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, in i40e_veb_setup() argument
14860 dev_info(&pf->pdev->dev, in i40e_veb_setup()
14867 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) in i40e_veb_setup()
14868 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) in i40e_veb_setup()
14870 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { in i40e_veb_setup()
14871 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", in i40e_veb_setup()
14876 if (uplink_seid && uplink_seid != pf->mac_seid) { in i40e_veb_setup()
14878 if (pf->veb[veb_idx] && in i40e_veb_setup()
14879 pf->veb[veb_idx]->seid == uplink_seid) { in i40e_veb_setup()
14880 uplink_veb = pf->veb[veb_idx]; in i40e_veb_setup()
14885 dev_info(&pf->pdev->dev, in i40e_veb_setup()
14892 veb_idx = i40e_veb_mem_alloc(pf); in i40e_veb_setup()
14895 veb = pf->veb[veb_idx]; in i40e_veb_setup()
14902 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); in i40e_veb_setup()
14905 if (vsi_idx == pf->lan_vsi) in i40e_veb_setup()
14906 pf->lan_veb = veb->idx; in i40e_veb_setup()
14917 * i40e_setup_pf_switch_element - set PF vars based on switch type
14918 * @pf: board private structure
14925 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, in i40e_setup_pf_switch_element() argument
14935 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch_element()
14941 pf->mac_seid = seid; in i40e_setup_pf_switch_element()
14945 if (uplink_seid != pf->mac_seid) in i40e_setup_pf_switch_element()
14947 if (pf->lan_veb >= I40E_MAX_VEB) { in i40e_setup_pf_switch_element()
14952 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { in i40e_setup_pf_switch_element()
14953 pf->lan_veb = v; in i40e_setup_pf_switch_element()
14957 if (pf->lan_veb >= I40E_MAX_VEB) { in i40e_setup_pf_switch_element()
14958 v = i40e_veb_mem_alloc(pf); in i40e_setup_pf_switch_element()
14961 pf->lan_veb = v; in i40e_setup_pf_switch_element()
14964 if (pf->lan_veb >= I40E_MAX_VEB) in i40e_setup_pf_switch_element()
14967 pf->veb[pf->lan_veb]->seid = seid; in i40e_setup_pf_switch_element()
14968 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; in i40e_setup_pf_switch_element()
14969 pf->veb[pf->lan_veb]->pf = pf; in i40e_setup_pf_switch_element()
14970 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; in i40e_setup_pf_switch_element()
14976 * the PF's VSI in i40e_setup_pf_switch_element()
14978 pf->mac_seid = uplink_seid; in i40e_setup_pf_switch_element()
14979 pf->main_vsi_seid = seid; in i40e_setup_pf_switch_element()
14981 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch_element()
14983 downlink_seid, pf->main_vsi_seid); in i40e_setup_pf_switch_element()
14994 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", in i40e_setup_pf_switch_element()
15002 * @pf: board private structure
15008 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) in i40e_fetch_switch_configuration() argument
15024 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, in i40e_fetch_switch_configuration()
15028 dev_info(&pf->pdev->dev, in i40e_fetch_switch_configuration()
15031 i40e_aq_str(&pf->hw, in i40e_fetch_switch_configuration()
15032 pf->hw.aq.asq_last_status)); in i40e_fetch_switch_configuration()
15041 dev_info(&pf->pdev->dev, in i40e_fetch_switch_configuration()
15049 i40e_setup_pf_switch_element(pf, ele, num_reported, in i40e_fetch_switch_configuration()
15060 * @pf: board private structure
15066 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired) in i40e_setup_pf_switch() argument
15072 ret = i40e_fetch_switch_configuration(pf, false); in i40e_setup_pf_switch()
15074 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch()
15077 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_setup_pf_switch()
15080 i40e_pf_reset_stats(pf); in i40e_setup_pf_switch()
15088 if ((pf->hw.pf_id == 0) && in i40e_setup_pf_switch()
15089 !test_bit(I40E_FLAG_TRUE_PROMISC_ENA, pf->flags)) { in i40e_setup_pf_switch()
15091 pf->last_sw_conf_flags = flags; in i40e_setup_pf_switch()
15094 if (pf->hw.pf_id == 0) { in i40e_setup_pf_switch()
15098 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0, in i40e_setup_pf_switch()
15100 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { in i40e_setup_pf_switch()
15101 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch()
15104 i40e_aq_str(&pf->hw, in i40e_setup_pf_switch()
15105 pf->hw.aq.asq_last_status)); in i40e_setup_pf_switch()
15108 pf->last_sw_conf_valid_flags = valid_flags; in i40e_setup_pf_switch()
15112 if (pf->lan_vsi == I40E_NO_VSI || reinit) { in i40e_setup_pf_switch()
15116 /* Set up the PF VSI associated with the PF's main VSI in i40e_setup_pf_switch()
15119 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) in i40e_setup_pf_switch()
15120 uplink_seid = pf->veb[pf->lan_veb]->seid; in i40e_setup_pf_switch()
15122 uplink_seid = pf->mac_seid; in i40e_setup_pf_switch()
15123 if (pf->lan_vsi == I40E_NO_VSI) in i40e_setup_pf_switch()
15124 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); in i40e_setup_pf_switch()
15126 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
15128 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); in i40e_setup_pf_switch()
15129 i40e_cloud_filter_exit(pf); in i40e_setup_pf_switch()
15130 i40e_fdir_teardown(pf); in i40e_setup_pf_switch()
15135 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_setup_pf_switch()
15137 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_setup_pf_switch()
15138 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_setup_pf_switch()
15139 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_setup_pf_switch()
15141 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
15143 i40e_fdir_sb_setup(pf); in i40e_setup_pf_switch()
15145 /* Setup static PF queue filter control settings */ in i40e_setup_pf_switch()
15146 ret = i40e_setup_pf_filter_control(pf); in i40e_setup_pf_switch()
15148 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", in i40e_setup_pf_switch()
15156 if (test_bit(I40E_FLAG_RSS_ENA, pf->flags)) in i40e_setup_pf_switch()
15157 i40e_pf_config_rss(pf); in i40e_setup_pf_switch()
15160 i40e_link_event(pf); in i40e_setup_pf_switch()
15162 i40e_ptp_init(pf); in i40e_setup_pf_switch()
15168 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); in i40e_setup_pf_switch()
15178 * @pf: board private structure
15180 static void i40e_determine_queue_usage(struct i40e_pf *pf) in i40e_determine_queue_usage() argument
15185 pf->num_lan_qps = 0; in i40e_determine_queue_usage()
15191 queues_left = pf->hw.func_caps.num_tx_qp; in i40e_determine_queue_usage()
15194 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_determine_queue_usage()
15195 /* one qp for PF, no queues for anything else */ in i40e_determine_queue_usage()
15197 pf->alloc_rss_size = pf->num_lan_qps = 1; in i40e_determine_queue_usage()
15200 clear_bit(I40E_FLAG_RSS_ENA, pf->flags); in i40e_determine_queue_usage()
15201 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); in i40e_determine_queue_usage()
15202 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_determine_queue_usage()
15203 clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); in i40e_determine_queue_usage()
15204 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_determine_queue_usage()
15205 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_determine_queue_usage()
15206 clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); in i40e_determine_queue_usage()
15207 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); in i40e_determine_queue_usage()
15208 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_determine_queue_usage()
15209 } else if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags) && in i40e_determine_queue_usage()
15210 !test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && in i40e_determine_queue_usage()
15211 !test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && in i40e_determine_queue_usage()
15212 !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) { in i40e_determine_queue_usage()
15213 /* one qp for PF */ in i40e_determine_queue_usage()
15214 pf->alloc_rss_size = pf->num_lan_qps = 1; in i40e_determine_queue_usage()
15215 queues_left -= pf->num_lan_qps; in i40e_determine_queue_usage()
15217 clear_bit(I40E_FLAG_RSS_ENA, pf->flags); in i40e_determine_queue_usage()
15218 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); in i40e_determine_queue_usage()
15219 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_determine_queue_usage()
15220 clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); in i40e_determine_queue_usage()
15221 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_determine_queue_usage()
15222 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); in i40e_determine_queue_usage()
15223 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_determine_queue_usage()
15226 if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags) && in i40e_determine_queue_usage()
15228 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_determine_queue_usage()
15229 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_determine_queue_usage()
15230 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); in i40e_determine_queue_usage()
15234 q_max = max_t(int, pf->rss_size_max, num_online_cpus()); in i40e_determine_queue_usage()
15235 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp); in i40e_determine_queue_usage()
15236 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors); in i40e_determine_queue_usage()
15237 pf->num_lan_qps = q_max; in i40e_determine_queue_usage()
15239 queues_left -= pf->num_lan_qps; in i40e_determine_queue_usage()
15242 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { in i40e_determine_queue_usage()
15246 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_determine_queue_usage()
15247 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_determine_queue_usage()
15248 …dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n… in i40e_determine_queue_usage()
15252 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && in i40e_determine_queue_usage()
15253 pf->num_vf_qps && pf->num_req_vfs && queues_left) { in i40e_determine_queue_usage()
15254 pf->num_req_vfs = min_t(int, pf->num_req_vfs, in i40e_determine_queue_usage()
15255 (queues_left / pf->num_vf_qps)); in i40e_determine_queue_usage()
15256 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); in i40e_determine_queue_usage()
15259 if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && in i40e_determine_queue_usage()
15260 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { in i40e_determine_queue_usage()
15261 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, in i40e_determine_queue_usage()
15262 (queues_left / pf->num_vmdq_qps)); in i40e_determine_queue_usage()
15263 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); in i40e_determine_queue_usage()
15266 pf->queues_left = queues_left; in i40e_determine_queue_usage()
15267 dev_dbg(&pf->pdev->dev, in i40e_determine_queue_usage()
15269 pf->hw.func_caps.num_tx_qp, in i40e_determine_queue_usage()
15270 !!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags), in i40e_determine_queue_usage()
15271 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, in i40e_determine_queue_usage()
15272 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, in i40e_determine_queue_usage()
15277 * i40e_setup_pf_filter_control - Setup PF static filter control
15278 * @pf: PF to be setup
15280 * i40e_setup_pf_filter_control sets up a PF's initial filter control
15281 * settings. If PE/FCoE are enabled then it will also set the per PF
15283 * ethertype and macvlan type filter settings for the pf.
15287 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) in i40e_setup_pf_filter_control() argument
15289 struct i40e_filter_control_settings *settings = &pf->filter_settings; in i40e_setup_pf_filter_control()
15294 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) || in i40e_setup_pf_filter_control()
15295 test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) in i40e_setup_pf_filter_control()
15298 /* Ethtype and MACVLAN filters enabled for PF */ in i40e_setup_pf_filter_control()
15302 if (i40e_set_filter_control(&pf->hw, settings)) in i40e_setup_pf_filter_control()
15310 static void i40e_print_features(struct i40e_pf *pf) in i40e_print_features() argument
15312 struct i40e_hw *hw = &pf->hw; in i40e_print_features()
15320 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); in i40e_print_features()
15322 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); in i40e_print_features()
15325 pf->hw.func_caps.num_vsis, in i40e_print_features()
15326 pf->vsi[pf->lan_vsi]->num_queue_pairs); in i40e_print_features()
15327 if (test_bit(I40E_FLAG_RSS_ENA, pf->flags)) in i40e_print_features()
15329 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) in i40e_print_features()
15331 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { in i40e_print_features()
15335 if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) in i40e_print_features()
15339 if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) in i40e_print_features()
15341 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) in i40e_print_features()
15346 dev_info(&pf->pdev->dev, "%s\n", buf); in i40e_print_features()
15354 * @pf: board private structure
15361 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) in i40e_get_platform_mac_addr() argument
15363 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) in i40e_get_platform_mac_addr()
15364 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); in i40e_get_platform_mac_addr()
15396 * @pf: board private structure
15403 static bool i40e_check_recovery_mode(struct i40e_pf *pf) in i40e_check_recovery_mode() argument
15405 u32 val = rd32(&pf->hw, I40E_GL_FWSTS); in i40e_check_recovery_mode()
15408 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); in i40e_check_recovery_mode()
15409 …dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for detai… in i40e_check_recovery_mode()
15410 set_bit(__I40E_RECOVERY_MODE, pf->state); in i40e_check_recovery_mode()
15414 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_check_recovery_mode()
15415 …dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full … in i40e_check_recovery_mode()
15422 * @pf: board private structure
15431 * state is to issue a series of pf-resets and check a return value.
15432 * If a PF reset returns success then the firmware could be in recovery
15441 static int i40e_pf_loop_reset(struct i40e_pf *pf) in i40e_pf_loop_reset() argument
15443 /* wait max 10 seconds for PF reset to succeed */ in i40e_pf_loop_reset()
15445 struct i40e_hw *hw = &pf->hw; in i40e_pf_loop_reset()
15455 pf->pfr_count++; in i40e_pf_loop_reset()
15457 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); in i40e_pf_loop_reset()
15464 * @pf: board private structure
15473 static bool i40e_check_fw_empr(struct i40e_pf *pf) in i40e_check_fw_empr() argument
15475 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) & in i40e_check_fw_empr()
15482 * i40e_handle_resets - handle EMP resets and PF resets
15483 * @pf: board private structure
15485 * Handle both EMP resets and PF resets and conclude whether there are
15492 static int i40e_handle_resets(struct i40e_pf *pf) in i40e_handle_resets() argument
15494 const int pfr = i40e_pf_loop_reset(pf); in i40e_handle_resets()
15495 const bool is_empr = i40e_check_fw_empr(pf); in i40e_handle_resets()
15498 …dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several … in i40e_handle_resets()
15505 * @pf: board private structure
15513 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) in i40e_init_recovery_mode() argument
15519 pci_set_drvdata(pf->pdev, pf); in i40e_init_recovery_mode()
15520 pci_save_state(pf->pdev); in i40e_init_recovery_mode()
15523 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_init_recovery_mode()
15524 pf->service_timer_period = HZ; in i40e_init_recovery_mode()
15526 INIT_WORK(&pf->service_task, i40e_service_task); in i40e_init_recovery_mode()
15527 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_init_recovery_mode()
15529 err = i40e_init_interrupt_scheme(pf); in i40e_init_recovery_mode()
15538 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) in i40e_init_recovery_mode()
15539 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; in i40e_init_recovery_mode()
15541 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; in i40e_init_recovery_mode()
15543 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */ in i40e_init_recovery_mode()
15544 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_init_recovery_mode()
15546 if (!pf->vsi) { in i40e_init_recovery_mode()
15554 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN); in i40e_init_recovery_mode()
15559 pf->lan_vsi = v_idx; in i40e_init_recovery_mode()
15560 vsi = pf->vsi[v_idx]; in i40e_init_recovery_mode()
15573 i40e_dbg_pf_init(pf); in i40e_init_recovery_mode()
15575 err = i40e_setup_misc_vector_for_recovery_mode(pf); in i40e_init_recovery_mode()
15580 i40e_send_version(pf); in i40e_init_recovery_mode()
15583 mod_timer(&pf->service_timer, in i40e_init_recovery_mode()
15584 round_jiffies(jiffies + pf->service_timer_period)); in i40e_init_recovery_mode()
15589 i40e_reset_interrupt_capability(pf); in i40e_init_recovery_mode()
15590 timer_shutdown_sync(&pf->service_timer); in i40e_init_recovery_mode()
15593 pci_release_mem_regions(pf->pdev); in i40e_init_recovery_mode()
15594 pci_disable_device(pf->pdev); in i40e_init_recovery_mode()
15595 i40e_free_pf(pf); in i40e_init_recovery_mode()
15609 struct i40e_pf *pf = i40e_hw_to_pf(hw); in i40e_set_subsystem_device_id() local
15611 hw->subsystem_device_id = pf->pdev->subsystem_device ? in i40e_set_subsystem_device_id()
15612 pf->pdev->subsystem_device : in i40e_set_subsystem_device_id()
15621 * i40e_probe initializes a PF identified by a pci_dev structure.
15622 * The OS initialization, configuring of the PF private structure,
15633 struct i40e_pf *pf; in i40e_probe() local
15672 pf = i40e_alloc_pf(&pdev->dev); in i40e_probe()
15673 if (!pf) { in i40e_probe()
15677 pf->next_vsi = 0; in i40e_probe()
15678 pf->pdev = pdev; in i40e_probe()
15679 set_bit(__I40E_DOWN, pf->state); in i40e_probe()
15681 hw = &pf->hw; in i40e_probe()
15683 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), in i40e_probe()
15690 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) { in i40e_probe()
15692 pf->ioremap_len); in i40e_probe()
15696 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); in i40e_probe()
15701 pf->ioremap_len, err); in i40e_probe()
15720 INIT_LIST_HEAD(&pf->l3_flex_pit_list); in i40e_probe()
15721 INIT_LIST_HEAD(&pf->l4_flex_pit_list); in i40e_probe()
15722 INIT_LIST_HEAD(&pf->ddp_old_prof); in i40e_probe()
15730 pf->msg_enable = netif_msg_init(debug, in i40e_probe()
15735 pf->hw.debug_mask = debug; in i40e_probe()
15743 pf->corer_count++; in i40e_probe()
15748 /* Reset here to make sure all is clean and to define PF 'n' */ in i40e_probe()
15758 err = i40e_handle_resets(pf); in i40e_probe()
15762 i40e_check_recovery_mode(pf); in i40e_probe()
15774 snprintf(pf->int_name, sizeof(pf->int_name) - 1, in i40e_probe()
15776 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); in i40e_probe()
15786 pf->hw.fc.requested_mode = I40E_FC_NONE; in i40e_probe()
15830 i40e_verify_eeprom(pf); in i40e_probe()
15838 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); in i40e_probe()
15842 err = i40e_sw_init(pf); in i40e_probe()
15848 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_probe()
15849 return i40e_init_recovery_mode(pf, hw); in i40e_probe()
15869 if (test_bit(I40E_HW_CAP_STOP_FW_LLDP, pf->hw.caps)) { in i40e_probe()
15875 i40e_get_platform_mac_addr(pdev, pf); in i40e_probe()
15886 set_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps); in i40e_probe()
15888 i40e_ptp_alloc_pins(pf); in i40e_probe()
15889 pci_set_drvdata(pdev, pf); in i40e_probe()
15893 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status); in i40e_probe()
15896 (clear_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) : in i40e_probe()
15897 (set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)); in i40e_probe()
15899 test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ? in i40e_probe()
15906 err = i40e_init_pf_dcb(pf); in i40e_probe()
15909 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_probe()
15910 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_probe()
15916 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_probe()
15917 pf->service_timer_period = HZ; in i40e_probe()
15919 INIT_WORK(&pf->service_task, i40e_service_task); in i40e_probe()
15920 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_probe()
15925 pf->wol_en = false; in i40e_probe()
15927 pf->wol_en = true; in i40e_probe()
15928 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); in i40e_probe()
15931 i40e_determine_queue_usage(pf); in i40e_probe()
15932 err = i40e_init_interrupt_scheme(pf); in i40e_probe()
15938 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus in i40e_probe()
15939 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1. in i40e_probe()
15942 pf->num_lan_msix = 1; in i40e_probe()
15944 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port; in i40e_probe()
15945 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port; in i40e_probe()
15946 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; in i40e_probe()
15947 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared; in i40e_probe()
15948 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS; in i40e_probe()
15949 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN | in i40e_probe()
15957 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) in i40e_probe()
15958 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; in i40e_probe()
15960 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; in i40e_probe()
15961 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in i40e_probe()
15962 dev_warn(&pf->pdev->dev, in i40e_probe()
15964 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in i40e_probe()
15965 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in i40e_probe()
15968 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ in i40e_probe()
15969 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_probe()
15971 if (!pf->vsi) { in i40e_probe()
15978 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && in i40e_probe()
15979 test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && in i40e_probe()
15980 !test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_probe()
15982 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); in i40e_probe()
15985 err = i40e_setup_pf_switch(pf, false, false); in i40e_probe()
15990 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); in i40e_probe()
15993 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_probe()
15994 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { in i40e_probe()
15995 i40e_vsi_open(pf->vsi[i]); in i40e_probe()
16003 err = i40e_aq_set_phy_int_mask(&pf->hw, in i40e_probe()
16008 dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", in i40e_probe()
16010 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
16023 if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) { in i40e_probe()
16025 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); in i40e_probe()
16027 dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", in i40e_probe()
16029 i40e_aq_str(&pf->hw, in i40e_probe()
16030 pf->hw.aq.asq_last_status)); in i40e_probe()
16036 clear_bit(__I40E_DOWN, pf->state); in i40e_probe()
16043 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_probe()
16044 err = i40e_setup_misc_vector(pf); in i40e_probe()
16048 i40e_cloud_filter_exit(pf); in i40e_probe()
16049 i40e_fdir_teardown(pf); in i40e_probe()
16056 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && in i40e_probe()
16057 test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && in i40e_probe()
16058 !test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_probe()
16068 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); in i40e_probe()
16077 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { in i40e_probe()
16078 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_probe()
16079 pf->num_iwarp_msix, in i40e_probe()
16081 if (pf->iwarp_base_vector < 0) { in i40e_probe()
16084 pf->num_iwarp_msix, pf->iwarp_base_vector); in i40e_probe()
16085 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); in i40e_probe()
16089 i40e_dbg_pf_init(pf); in i40e_probe()
16092 i40e_send_version(pf); in i40e_probe()
16095 mod_timer(&pf->service_timer, in i40e_probe()
16096 round_jiffies(jiffies + pf->service_timer_period)); in i40e_probe()
16098 /* add this PF to client device list and launch a client service task */ in i40e_probe()
16099 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { in i40e_probe()
16100 err = i40e_lan_add_device(pf); in i40e_probe()
16102 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", in i40e_probe()
16112 if (!test_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, pf->hw.caps)) { in i40e_probe()
16119 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, in i40e_probe()
16160 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n", in i40e_probe()
16162 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
16163 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; in i40e_probe()
16166 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, pf->flags); in i40e_probe()
16171 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n", in i40e_probe()
16173 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
16178 rd32(&pf->hw, I40E_PRTGL_SAH)); in i40e_probe()
16181 pf->hw.port, val); in i40e_probe()
16186 * PF/VF VSIs. in i40e_probe()
16189 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, in i40e_probe()
16190 pf->main_vsi_seid); in i40e_probe()
16192 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || in i40e_probe()
16193 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) in i40e_probe()
16194 set_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps); in i40e_probe()
16195 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) in i40e_probe()
16196 set_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps); in i40e_probe()
16198 i40e_print_features(pf); in i40e_probe()
16200 i40e_devlink_register(pf); in i40e_probe()
16206 set_bit(__I40E_DOWN, pf->state); in i40e_probe()
16207 i40e_clear_interrupt_scheme(pf); in i40e_probe()
16208 kfree(pf->vsi); in i40e_probe()
16210 i40e_reset_interrupt_capability(pf); in i40e_probe()
16211 timer_shutdown_sync(&pf->service_timer); in i40e_probe()
16216 kfree(pf->qp_pile); in i40e_probe()
16222 i40e_free_pf(pf); in i40e_probe()
16242 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_remove() local
16243 struct i40e_hw *hw = &pf->hw; in i40e_remove()
16247 i40e_devlink_unregister(pf); in i40e_remove()
16249 i40e_dbg_pf_exit(pf); in i40e_remove()
16251 i40e_ptp_stop(pf); in i40e_remove()
16261 while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_remove()
16263 set_bit(__I40E_IN_REMOVE, pf->state); in i40e_remove()
16265 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) { in i40e_remove()
16266 set_bit(__I40E_VF_RESETS_DISABLED, pf->state); in i40e_remove()
16267 i40e_free_vfs(pf); in i40e_remove()
16268 clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); in i40e_remove()
16271 set_bit(__I40E_SUSPENDED, pf->state); in i40e_remove()
16272 set_bit(__I40E_DOWN, pf->state); in i40e_remove()
16273 if (pf->service_timer.function) in i40e_remove()
16274 timer_shutdown_sync(&pf->service_timer); in i40e_remove()
16275 if (pf->service_task.func) in i40e_remove()
16276 cancel_work_sync(&pf->service_task); in i40e_remove()
16278 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_remove()
16279 struct i40e_vsi *vsi = pf->vsi[0]; in i40e_remove()
16281 /* We know that we have allocated only one vsi for this PF, in i40e_remove()
16294 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_remove()
16296 i40e_fdir_teardown(pf); in i40e_remove()
16299 * This will leave only the PF's VSI remaining. in i40e_remove()
16302 if (!pf->veb[i]) in i40e_remove()
16305 if (pf->veb[i]->uplink_seid == pf->mac_seid || in i40e_remove()
16306 pf->veb[i]->uplink_seid == 0) in i40e_remove()
16307 i40e_switch_branch_release(pf->veb[i]); in i40e_remove()
16310 /* Now we can shutdown the PF's VSIs, just before we kill in i40e_remove()
16313 for (i = pf->num_alloc_vsi; i--;) in i40e_remove()
16314 if (pf->vsi[i]) { in i40e_remove()
16315 i40e_vsi_close(pf->vsi[i]); in i40e_remove()
16316 i40e_vsi_release(pf->vsi[i]); in i40e_remove()
16317 pf->vsi[i] = NULL; in i40e_remove()
16320 i40e_cloud_filter_exit(pf); in i40e_remove()
16323 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { in i40e_remove()
16324 ret_code = i40e_lan_del_device(pf); in i40e_remove()
16341 if (test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_remove()
16342 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_remove()
16343 free_irq(pf->pdev->irq, pf); in i40e_remove()
16354 i40e_clear_interrupt_scheme(pf); in i40e_remove()
16355 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_remove()
16356 if (pf->vsi[i]) { in i40e_remove()
16357 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_remove()
16358 i40e_vsi_clear_rings(pf->vsi[i]); in i40e_remove()
16359 i40e_vsi_clear(pf->vsi[i]); in i40e_remove()
16360 pf->vsi[i] = NULL; in i40e_remove()
16366 kfree(pf->veb[i]); in i40e_remove()
16367 pf->veb[i] = NULL; in i40e_remove()
16370 kfree(pf->qp_pile); in i40e_remove()
16371 kfree(pf->vsi); in i40e_remove()
16374 i40e_free_pf(pf); in i40e_remove()
16392 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_detected() local
16396 if (!pf) { in i40e_pci_error_detected()
16403 if (!test_bit(__I40E_SUSPENDED, pf->state)) in i40e_pci_error_detected()
16404 i40e_prep_for_reset(pf); in i40e_pci_error_detected()
16421 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_slot_reset() local
16436 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_pci_error_slot_reset()
16452 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_reset_prepare() local
16454 i40e_prep_for_reset(pf); in i40e_pci_error_reset_prepare()
16463 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_reset_done() local
16465 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_pci_error_reset_done()
16468 i40e_reset_and_rebuild(pf, false, false); in i40e_pci_error_reset_done()
16483 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_resume() local
16486 if (test_bit(__I40E_SUSPENDED, pf->state)) in i40e_pci_error_resume()
16489 i40e_handle_reset_warning(pf, false); in i40e_pci_error_resume()
16495 * @pf: pointer to i40e_pf struct
16497 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) in i40e_enable_mc_magic_wake() argument
16499 struct i40e_hw *hw = &pf->hw; in i40e_enable_mc_magic_wake()
16505 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { in i40e_enable_mc_magic_wake()
16507 pf->vsi[pf->lan_vsi]->netdev->dev_addr); in i40e_enable_mc_magic_wake()
16509 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16525 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16535 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16545 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_shutdown() local
16546 struct i40e_hw *hw = &pf->hw; in i40e_shutdown()
16548 set_bit(__I40E_SUSPENDED, pf->state); in i40e_shutdown()
16549 set_bit(__I40E_DOWN, pf->state); in i40e_shutdown()
16551 del_timer_sync(&pf->service_timer); in i40e_shutdown()
16552 cancel_work_sync(&pf->service_task); in i40e_shutdown()
16553 i40e_cloud_filter_exit(pf); in i40e_shutdown()
16554 i40e_fdir_teardown(pf); in i40e_shutdown()
16559 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_shutdown()
16561 if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && in i40e_shutdown()
16562 pf->wol_en) in i40e_shutdown()
16563 i40e_enable_mc_magic_wake(pf); in i40e_shutdown()
16565 i40e_prep_for_reset(pf); in i40e_shutdown()
16568 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_shutdown()
16570 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_shutdown()
16573 if (test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_shutdown()
16574 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_shutdown()
16575 free_irq(pf->pdev->irq, pf); in i40e_shutdown()
16582 i40e_clear_interrupt_scheme(pf); in i40e_shutdown()
16586 pci_wake_from_d3(pdev, pf->wol_en); in i40e_shutdown()
16597 struct i40e_pf *pf = dev_get_drvdata(dev); in i40e_suspend() local
16598 struct i40e_hw *hw = &pf->hw; in i40e_suspend()
16601 if (test_and_set_bit(__I40E_SUSPENDED, pf->state)) in i40e_suspend()
16604 set_bit(__I40E_DOWN, pf->state); in i40e_suspend()
16607 del_timer_sync(&pf->service_timer); in i40e_suspend()
16608 cancel_work_sync(&pf->service_task); in i40e_suspend()
16613 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_suspend()
16615 if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && in i40e_suspend()
16616 pf->wol_en) in i40e_suspend()
16617 i40e_enable_mc_magic_wake(pf); in i40e_suspend()
16625 i40e_prep_for_reset(pf); in i40e_suspend()
16627 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_suspend()
16628 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_suspend()
16635 i40e_clear_interrupt_scheme(pf); in i40e_suspend()
16648 struct i40e_pf *pf = dev_get_drvdata(dev); in i40e_resume() local
16652 if (!test_bit(__I40E_SUSPENDED, pf->state)) in i40e_resume()
16663 err = i40e_restore_interrupt_scheme(pf); in i40e_resume()
16669 clear_bit(__I40E_DOWN, pf->state); in i40e_resume()
16670 i40e_reset_and_rebuild(pf, false, true); in i40e_resume()
16675 clear_bit(__I40E_SUSPENDED, pf->state); in i40e_resume()
16678 mod_timer(&pf->service_timer, in i40e_resume()
16679 round_jiffies(jiffies + pf->service_timer_period)); in i40e_resume()