Lines Matching full:pf

19  * @pf: pointer to the PF structure
24 static void ice_free_vf_entries(struct ice_pf *pf) in ice_free_vf_entries() argument
26 struct ice_vfs *vfs = &pf->vfs; in ice_free_vf_entries()
49 struct ice_pf *pf = vf->pf; in ice_free_vf_res() local
75 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); in ice_free_vf_res()
76 ice_flush(&pf->hw); in ice_free_vf_res()
89 struct ice_pf *pf = vf->pf; in ice_dis_vf_mappings() local
95 hw = &pf->hw; in ice_dis_vf_mappings()
100 dev = ice_pf_to_dev(pf); in ice_dis_vf_mappings()
127 * @pf: pointer to the PF structure
129 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
130 * the pf->sriov_base_vector.
134 static int ice_sriov_free_msix_res(struct ice_pf *pf) in ice_sriov_free_msix_res() argument
136 if (!pf) in ice_sriov_free_msix_res()
139 bitmap_free(pf->sriov_irq_bm); in ice_sriov_free_msix_res()
140 pf->sriov_irq_size = 0; in ice_sriov_free_msix_res()
141 pf->sriov_base_vector = 0; in ice_sriov_free_msix_res()
148 * @pf: pointer to the PF structure
150 void ice_free_vfs(struct ice_pf *pf) in ice_free_vfs() argument
152 struct device *dev = ice_pf_to_dev(pf); in ice_free_vfs()
153 struct ice_vfs *vfs = &pf->vfs; in ice_free_vfs()
154 struct ice_hw *hw = &pf->hw; in ice_free_vfs()
158 if (!ice_has_vfs(pf)) in ice_free_vfs()
161 while (test_and_set_bit(ICE_VF_DIS, pf->state)) in ice_free_vfs()
168 if (!pci_vfs_assigned(pf->pdev)) in ice_free_vfs()
169 pci_disable_sriov(pf->pdev); in ice_free_vfs()
173 ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf)); in ice_free_vfs()
177 ice_for_each_vf(pf, bkt, vf) { in ice_free_vfs()
180 ice_eswitch_detach(pf, vf); in ice_free_vfs()
190 if (!pci_vfs_assigned(pf->pdev)) { in ice_free_vfs()
204 if (ice_sriov_free_msix_res(pf)) in ice_free_vfs()
208 ice_free_vf_entries(pf); in ice_free_vfs()
212 clear_bit(ICE_VF_DIS, pf->state); in ice_free_vfs()
213 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); in ice_free_vfs()
226 struct ice_pf *pf = vf->pf; in ice_vf_vsi_setup() local
234 vsi = ice_vsi_setup(pf, &params); in ice_vf_vsi_setup()
237 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); in ice_vf_vsi_setup()
254 * device values and other registers need 0-based values, which represent PF
261 struct ice_pf *pf = vf->pf; in ice_ena_vf_msix_mappings() local
266 hw = &pf->hw; in ice_ena_vf_msix_mappings()
271 pf->hw.func_caps.common_cap.msix_vector_first_id; in ice_ena_vf_msix_mappings()
305 struct device *dev = ice_pf_to_dev(vf->pf); in ice_ena_vf_q_mappings()
307 struct ice_hw *hw = &vf->pf->hw; in ice_ena_vf_q_mappings()
318 /* set the VF PF Tx queue range in ice_ena_vf_q_mappings()
334 /* set the VF PF Rx queue range in ice_ena_vf_q_mappings()
362 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
377 * @pf: pointer to PF structure
380 * This function allows SR-IOV resources to be taken from the end of the PF's
382 * just set the pf->sriov_base_vector and return success.
388 * in the PF's space available for SR-IOV.
390 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) in ice_sriov_set_msix_res() argument
392 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; in ice_sriov_set_msix_res()
393 int vectors_used = ice_get_max_used_msix_vector(pf); in ice_sriov_set_msix_res()
404 pf->sriov_base_vector = sriov_base_vector; in ice_sriov_set_msix_res()
411 * @pf: pointer to the PF structure
428 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
431 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) in ice_set_per_vf_res() argument
433 int vectors_used = ice_get_max_used_msix_vector(pf); in ice_set_per_vf_res()
436 struct device *dev = ice_pf_to_dev(pf); in ice_set_per_vf_res()
439 lockdep_assert_held(&pf->vfs.table_lock); in ice_set_per_vf_res()
445 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - in ice_set_per_vf_res()
465 avail_qs = ice_get_avail_txq_count(pf) / num_vfs; in ice_set_per_vf_res()
473 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; in ice_set_per_vf_res()
485 err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); in ice_set_per_vf_res()
493 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); in ice_set_per_vf_res()
494 pf->vfs.num_msix_per = num_msix_per_vf; in ice_set_per_vf_res()
496 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); in ice_set_per_vf_res()
503 * @pf: pointer to PF structure
506 * This returns the first MSI-X vector index in PF space that is used by this
507 * VF. This index is used when accessing PF relative registers such as
519 static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed) in ice_sriov_get_irqs() argument
521 int res = bitmap_find_next_zero_area(pf->sriov_irq_bm, in ice_sriov_get_irqs()
522 pf->sriov_irq_size, 0, needed, 0); in ice_sriov_get_irqs()
524 int index = pf->sriov_irq_size - res - needed; in ice_sriov_get_irqs()
526 if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector) in ice_sriov_get_irqs()
529 bitmap_set(pf->sriov_irq_bm, res, needed); in ice_sriov_get_irqs()
535 * @pf: pointer to PF structure
538 static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf) in ice_sriov_free_irqs() argument
541 int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix; in ice_sriov_free_irqs()
543 bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix); in ice_sriov_free_irqs()
556 struct ice_pf *pf = vf->pf; in ice_init_vf_vsi_res() local
560 vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); in ice_init_vf_vsi_res()
581 * @pf: PF the VFs are associated with
583 static int ice_start_vfs(struct ice_pf *pf) in ice_start_vfs() argument
585 struct ice_hw *hw = &pf->hw; in ice_start_vfs()
590 lockdep_assert_held(&pf->vfs.table_lock); in ice_start_vfs()
593 ice_for_each_vf(pf, bkt, vf) { in ice_start_vfs()
598 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", in ice_start_vfs()
603 retval = ice_eswitch_attach(pf, vf); in ice_start_vfs()
605 dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d", in ice_start_vfs()
621 ice_for_each_vf(pf, bkt, vf) { in ice_start_vfs()
653 struct ice_hw *hw = &vf->pf->hw; in ice_sriov_clear_reset_state()
668 struct ice_pf *pf = vf->pf; in ice_sriov_clear_mbx_register() local
670 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0); in ice_sriov_clear_mbx_register()
671 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0); in ice_sriov_clear_mbx_register()
683 struct ice_pf *pf = vf->pf; in ice_sriov_trigger_reset_register() local
689 dev = ice_pf_to_dev(pf); in ice_sriov_trigger_reset_register()
690 hw = &pf->hw; in ice_sriov_trigger_reset_register()
730 struct ice_pf *pf = vf->pf; in ice_sriov_poll_reset_status() local
739 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); in ice_sriov_poll_reset_status()
755 struct ice_hw *hw = &vf->pf->hw; in ice_sriov_clear_reset_trigger()
771 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); in ice_sriov_post_vsi_rebuild()
788 * @pf: pointer to the PF structure
799 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) in ice_create_vf_entries() argument
801 struct pci_dev *pdev = pf->pdev; in ice_create_vf_entries()
802 struct ice_vfs *vfs = &pf->vfs; in ice_create_vf_entries()
821 vf->pf = pf; in ice_create_vf_entries()
833 vf->vf_sw_id = pf->first_sw; in ice_create_vf_entries()
838 vf->num_msix = pf->vfs.num_msix_per; in ice_create_vf_entries()
839 vf->num_vf_qs = pf->vfs.num_qps_per; in ice_create_vf_entries()
854 ice_free_vf_entries(pf); in ice_create_vf_entries()
860 * @pf: pointer to the PF structure
863 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) in ice_ena_vfs() argument
865 int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; in ice_ena_vfs()
866 struct device *dev = ice_pf_to_dev(pf); in ice_ena_vfs()
867 struct ice_hw *hw = &pf->hw; in ice_ena_vfs()
870 pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL); in ice_ena_vfs()
871 if (!pf->sriov_irq_bm) in ice_ena_vfs()
873 pf->sriov_irq_size = total_vectors; in ice_ena_vfs()
876 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), in ice_ena_vfs()
878 set_bit(ICE_OICR_INTR_DIS, pf->state); in ice_ena_vfs()
881 ret = pci_enable_sriov(pf->pdev, num_vfs); in ice_ena_vfs()
885 mutex_lock(&pf->vfs.table_lock); in ice_ena_vfs()
887 ret = ice_set_per_vf_res(pf, num_vfs); in ice_ena_vfs()
894 ret = ice_create_vf_entries(pf, num_vfs); in ice_ena_vfs()
901 ice_eswitch_reserve_cp_queues(pf, num_vfs); in ice_ena_vfs()
902 ret = ice_start_vfs(pf); in ice_ena_vfs()
909 clear_bit(ICE_VF_DIS, pf->state); in ice_ena_vfs()
912 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) in ice_ena_vfs()
915 mutex_unlock(&pf->vfs.table_lock); in ice_ena_vfs()
920 ice_free_vf_entries(pf); in ice_ena_vfs()
922 mutex_unlock(&pf->vfs.table_lock); in ice_ena_vfs()
923 pci_disable_sriov(pf->pdev); in ice_ena_vfs()
927 clear_bit(ICE_OICR_INTR_DIS, pf->state); in ice_ena_vfs()
928 bitmap_free(pf->sriov_irq_bm); in ice_ena_vfs()
934 * @pf: pointer to the PF structure
939 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) in ice_pci_sriov_ena() argument
941 struct device *dev = ice_pf_to_dev(pf); in ice_pci_sriov_ena()
945 ice_free_vfs(pf); in ice_pci_sriov_ena()
949 if (num_vfs > pf->vfs.num_supported) { in ice_pci_sriov_ena()
951 num_vfs, pf->vfs.num_supported); in ice_pci_sriov_ena()
956 err = ice_ena_vfs(pf, num_vfs); in ice_pci_sriov_ena()
962 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); in ice_pci_sriov_ena()
968 * @pf: PF to enabled SR-IOV on
970 static int ice_check_sriov_allowed(struct ice_pf *pf) in ice_check_sriov_allowed() argument
972 struct device *dev = ice_pf_to_dev(pf); in ice_check_sriov_allowed()
974 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { in ice_check_sriov_allowed()
979 if (ice_is_safe_mode(pf)) { in ice_check_sriov_allowed()
984 if (!ice_pf_state_is_nominal(pf)) { in ice_check_sriov_allowed()
1000 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_sriov_get_vf_total_msix() local
1002 return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf); in ice_sriov_get_vf_total_msix()
1005 static int ice_sriov_move_base_vector(struct ice_pf *pf, int move) in ice_sriov_move_base_vector() argument
1007 if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf)) in ice_sriov_move_base_vector()
1010 pf->sriov_base_vector -= move; in ice_sriov_move_base_vector()
1014 static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id) in ice_sriov_remap_vectors() argument
1023 ice_for_each_vf(pf, bkt, tmp_vf) { in ice_sriov_remap_vectors()
1030 ice_sriov_free_irqs(pf, tmp_vf); in ice_sriov_remap_vectors()
1037 tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]); in ice_sriov_remap_vectors()
1042 ice_sriov_get_irqs(pf, tmp_vf->num_msix); in ice_sriov_remap_vectors()
1068 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_sriov_set_msix_vec_count() local
1075 if (!ice_get_num_vfs(pf)) in ice_sriov_set_msix_vec_count()
1085 if (queues > min(ice_get_avail_txq_count(pf), in ice_sriov_set_msix_vec_count()
1086 ice_get_avail_rxq_count(pf))) in ice_sriov_set_msix_vec_count()
1101 vf = ice_get_vf_by_id(pf, id); in ice_sriov_set_msix_vec_count()
1113 if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) { in ice_sriov_set_msix_vec_count()
1119 ice_sriov_free_irqs(pf, vf); in ice_sriov_set_msix_vec_count()
1122 ice_sriov_remap_vectors(pf, vf->vf_id); in ice_sriov_set_msix_vec_count()
1126 vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); in ice_sriov_set_msix_vec_count()
1136 dev_info(ice_pf_to_dev(pf), in ice_sriov_set_msix_vec_count()
1146 dev_info(ice_pf_to_dev(pf), in ice_sriov_set_msix_vec_count()
1152 vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); in ice_sriov_set_msix_vec_count()
1178 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_sriov_configure() local
1179 struct device *dev = ice_pf_to_dev(pf); in ice_sriov_configure()
1182 err = ice_check_sriov_allowed(pf); in ice_sriov_configure()
1188 ice_free_vfs(pf); in ice_sriov_configure()
1196 err = ice_pci_sriov_ena(pf, num_vfs); in ice_sriov_configure()
1205 * @pf: pointer to the PF structure
1210 void ice_process_vflr_event(struct ice_pf *pf) in ice_process_vflr_event() argument
1212 struct ice_hw *hw = &pf->hw; in ice_process_vflr_event()
1217 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || in ice_process_vflr_event()
1218 !ice_has_vfs(pf)) in ice_process_vflr_event()
1221 mutex_lock(&pf->vfs.table_lock); in ice_process_vflr_event()
1222 ice_for_each_vf(pf, bkt, vf) { in ice_process_vflr_event()
1233 mutex_unlock(&pf->vfs.table_lock); in ice_process_vflr_event()
1237 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1238 * @pf: PF used to index all VFs
1239 * @pfq: queue index relative to the PF's function space
1248 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) in ice_get_vf_from_pfq() argument
1254 ice_for_each_vf_rcu(pf, bkt, vf) { in ice_get_vf_from_pfq()
1280 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1281 * @pf: PF used for conversion
1282 * @globalq: global queue index used to convert to PF space queue index
1284 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) in ice_globalq_to_pfq() argument
1286 return globalq - pf->hw.func_caps.common_cap.rxq_first_id; in ice_globalq_to_pfq()
1291 * @pf: PF that the LAN overflow event happened on
1299 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_vf_lan_overflow_event() argument
1305 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); in ice_vf_lan_overflow_event()
1310 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); in ice_vf_lan_overflow_event()
1329 struct ice_pf *pf = np->vsi->back; in ice_set_vf_spoofchk() local
1335 dev = ice_pf_to_dev(pf); in ice_set_vf_spoofchk()
1337 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_spoofchk()
1389 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_get_vf_cfg() local
1393 vf = ice_get_vf_by_id(pf, vf_id); in ice_get_vf_cfg()
1436 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_mac() local
1445 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_mac()
1462 /* VF is notified of its new MAC via the PF's response to the in ice_set_vf_mac()
1473 /* PF will add MAC rule for the VF */ in ice_set_vf_mac()
1497 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_trust() local
1501 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_trust()
1505 if (ice_is_eswitch_mode_switchdev(pf)) { in ice_set_vf_trust()
1506 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); in ice_set_vf_trust()
1524 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", in ice_set_vf_trust()
1544 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_link_state() local
1548 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_link_state()
1582 * @pf: PF associated with VFs
1584 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) in ice_calc_all_vfs_min_tx_rate() argument
1591 ice_for_each_vf_rcu(pf, bkt, vf) in ice_calc_all_vfs_min_tx_rate()
1621 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); in ice_min_tx_rate_oversubscribed()
1627 …dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d… in ice_min_tx_rate_oversubscribed()
1648 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_bw() local
1654 dev = ice_pf_to_dev(pf); in ice_set_vf_bw()
1656 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_bw()
1670 if (min_tx_rate && ice_is_dcb_active(pf)) { in ice_set_vf_bw()
1671 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); in ice_set_vf_bw()
1710 * @netdev: the netdev of the PF
1717 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_get_vf_stats() local
1723 vf = ice_get_vf_by_id(pf, vf_id); in ice_get_vf_stats()
1799 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_port_vlan() local
1805 dev = ice_pf_to_dev(pf); in ice_set_vf_port_vlan()
1813 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { in ice_set_vf_port_vlan()
1819 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_port_vlan()
1860 struct ice_pf *pf = vf->pf; in ice_print_vf_rx_mdd_event() local
1863 dev = ice_pf_to_dev(pf); in ice_print_vf_rx_mdd_event()
1865 …dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-r… in ice_print_vf_rx_mdd_event()
1866 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, in ice_print_vf_rx_mdd_event()
1868 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) in ice_print_vf_rx_mdd_event()
1874 * @pf: pointer to the PF structure
1878 void ice_print_vfs_mdd_events(struct ice_pf *pf) in ice_print_vfs_mdd_events() argument
1880 struct device *dev = ice_pf_to_dev(pf); in ice_print_vfs_mdd_events()
1881 struct ice_hw *hw = &pf->hw; in ice_print_vfs_mdd_events()
1886 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) in ice_print_vfs_mdd_events()
1890 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) in ice_print_vfs_mdd_events()
1893 pf->vfs.last_printed_mdd_jiffies = jiffies; in ice_print_vfs_mdd_events()
1895 mutex_lock(&pf->vfs.table_lock); in ice_print_vfs_mdd_events()
1896 ice_for_each_vf(pf, bkt, vf) { in ice_print_vfs_mdd_events()
1909 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", in ice_print_vfs_mdd_events()
1914 mutex_unlock(&pf->vfs.table_lock); in ice_print_vfs_mdd_events()
1918 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
1919 * @pf: pointer to the PF structure
1921 * Called when recovering from a PF FLR to restore interrupt capability to
1924 void ice_restore_all_vfs_msi_state(struct ice_pf *pf) in ice_restore_all_vfs_msi_state() argument
1929 ice_for_each_vf(pf, bkt, vf) in ice_restore_all_vfs_msi_state()