Lines Matching full:pf

19  * @pf: pointer to the PF structure
24 static void ice_free_vf_entries(struct ice_pf *pf) in ice_free_vf_entries() argument
26 struct ice_vfs *vfs = &pf->vfs; in ice_free_vf_entries()
50 struct ice_pf *pf = vf->pf; in ice_free_vf_res() local
76 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); in ice_free_vf_res()
77 ice_flush(&pf->hw); in ice_free_vf_res()
90 struct ice_pf *pf = vf->pf; in ice_dis_vf_mappings() local
96 hw = &pf->hw; in ice_dis_vf_mappings()
101 dev = ice_pf_to_dev(pf); in ice_dis_vf_mappings()
128 * @pf: pointer to the PF structure
130 void ice_free_vfs(struct ice_pf *pf) in ice_free_vfs() argument
132 struct device *dev = ice_pf_to_dev(pf); in ice_free_vfs()
133 struct ice_vfs *vfs = &pf->vfs; in ice_free_vfs()
134 struct ice_hw *hw = &pf->hw; in ice_free_vfs()
138 if (!ice_has_vfs(pf)) in ice_free_vfs()
141 while (test_and_set_bit(ICE_VF_DIS, pf->state)) in ice_free_vfs()
148 if (!pci_vfs_assigned(pf->pdev)) in ice_free_vfs()
149 pci_disable_sriov(pf->pdev); in ice_free_vfs()
155 ice_for_each_vf(pf, bkt, vf) { in ice_free_vfs()
158 ice_eswitch_detach_vf(pf, vf); in ice_free_vfs()
160 ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix); in ice_free_vfs()
169 if (!pci_vfs_assigned(pf->pdev)) { in ice_free_vfs()
181 ice_free_vf_entries(pf); in ice_free_vfs()
185 clear_bit(ICE_VF_DIS, pf->state); in ice_free_vfs()
186 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); in ice_free_vfs()
199 struct ice_pf *pf = vf->pf; in ice_vf_vsi_setup() local
207 vsi = ice_vsi_setup(pf, &params); in ice_vf_vsi_setup()
210 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); in ice_vf_vsi_setup()
226 * device values and other registers need 0-based values, which represent PF
233 struct ice_pf *pf = vf->pf; in ice_ena_vf_msix_mappings() local
238 hw = &pf->hw; in ice_ena_vf_msix_mappings()
243 pf->hw.func_caps.common_cap.msix_vector_first_id; in ice_ena_vf_msix_mappings()
277 struct device *dev = ice_pf_to_dev(vf->pf); in ice_ena_vf_q_mappings()
279 struct ice_hw *hw = &vf->pf->hw; in ice_ena_vf_q_mappings()
290 /* set the VF PF Tx queue range in ice_ena_vf_q_mappings()
306 /* set the VF PF Rx queue range in ice_ena_vf_q_mappings()
334 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
350 * @pf: pointer to the PF structure
367 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
370 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) in ice_set_per_vf_res() argument
374 struct device *dev = ice_pf_to_dev(pf); in ice_set_per_vf_res()
376 lockdep_assert_held(&pf->vfs.table_lock); in ice_set_per_vf_res()
382 msix_avail_for_sriov = pf->virt_irq_tracker.num_entries; in ice_set_per_vf_res()
401 avail_qs = ice_get_avail_txq_count(pf) / num_vfs; in ice_set_per_vf_res()
409 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; in ice_set_per_vf_res()
422 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); in ice_set_per_vf_res()
423 pf->vfs.num_msix_per = num_msix_per_vf; in ice_set_per_vf_res()
425 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); in ice_set_per_vf_res()
439 struct ice_pf *pf = vf->pf; in ice_init_vf_vsi_res() local
443 vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix); in ice_init_vf_vsi_res()
464 * @pf: PF the VFs are associated with
466 static int ice_start_vfs(struct ice_pf *pf) in ice_start_vfs() argument
468 struct ice_hw *hw = &pf->hw; in ice_start_vfs()
473 lockdep_assert_held(&pf->vfs.table_lock); in ice_start_vfs()
476 ice_for_each_vf(pf, bkt, vf) { in ice_start_vfs()
481 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", in ice_start_vfs()
486 retval = ice_eswitch_attach_vf(pf, vf); in ice_start_vfs()
488 dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d", in ice_start_vfs()
504 ice_for_each_vf(pf, bkt, vf) { in ice_start_vfs()
536 struct ice_hw *hw = &vf->pf->hw; in ice_sriov_clear_reset_state()
551 struct ice_pf *pf = vf->pf; in ice_sriov_clear_mbx_register() local
553 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0); in ice_sriov_clear_mbx_register()
554 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0); in ice_sriov_clear_mbx_register()
566 struct ice_pf *pf = vf->pf; in ice_sriov_trigger_reset_register() local
572 dev = ice_pf_to_dev(pf); in ice_sriov_trigger_reset_register()
573 hw = &pf->hw; in ice_sriov_trigger_reset_register()
613 struct ice_pf *pf = vf->pf; in ice_sriov_poll_reset_status() local
622 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); in ice_sriov_poll_reset_status()
638 struct ice_hw *hw = &vf->pf->hw; in ice_sriov_clear_reset_trigger()
654 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); in ice_sriov_post_vsi_rebuild()
671 * @pf: pointer to the PF structure
682 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) in ice_create_vf_entries() argument
684 struct pci_dev *pdev = pf->pdev; in ice_create_vf_entries()
685 struct ice_vfs *vfs = &pf->vfs; in ice_create_vf_entries()
704 vf->pf = pf; in ice_create_vf_entries()
716 vf->vf_sw_id = pf->first_sw; in ice_create_vf_entries()
732 ice_free_vf_entries(pf); in ice_create_vf_entries()
738 * @pf: pointer to the PF structure
741 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) in ice_ena_vfs() argument
743 struct device *dev = ice_pf_to_dev(pf); in ice_ena_vfs()
744 struct ice_hw *hw = &pf->hw; in ice_ena_vfs()
748 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), in ice_ena_vfs()
750 set_bit(ICE_OICR_INTR_DIS, pf->state); in ice_ena_vfs()
753 ret = pci_enable_sriov(pf->pdev, num_vfs); in ice_ena_vfs()
757 mutex_lock(&pf->vfs.table_lock); in ice_ena_vfs()
759 ret = ice_set_per_vf_res(pf, num_vfs); in ice_ena_vfs()
766 ret = ice_create_vf_entries(pf, num_vfs); in ice_ena_vfs()
773 ret = ice_start_vfs(pf); in ice_ena_vfs()
780 clear_bit(ICE_VF_DIS, pf->state); in ice_ena_vfs()
783 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) in ice_ena_vfs()
786 mutex_unlock(&pf->vfs.table_lock); in ice_ena_vfs()
791 ice_free_vf_entries(pf); in ice_ena_vfs()
793 mutex_unlock(&pf->vfs.table_lock); in ice_ena_vfs()
794 pci_disable_sriov(pf->pdev); in ice_ena_vfs()
798 clear_bit(ICE_OICR_INTR_DIS, pf->state); in ice_ena_vfs()
804 * @pf: pointer to the PF structure
809 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) in ice_pci_sriov_ena() argument
811 struct device *dev = ice_pf_to_dev(pf); in ice_pci_sriov_ena()
815 ice_free_vfs(pf); in ice_pci_sriov_ena()
819 if (num_vfs > pf->vfs.num_supported) { in ice_pci_sriov_ena()
821 num_vfs, pf->vfs.num_supported); in ice_pci_sriov_ena()
826 err = ice_ena_vfs(pf, num_vfs); in ice_pci_sriov_ena()
832 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); in ice_pci_sriov_ena()
838 * @pf: PF to enabled SR-IOV on
840 static int ice_check_sriov_allowed(struct ice_pf *pf) in ice_check_sriov_allowed() argument
842 struct device *dev = ice_pf_to_dev(pf); in ice_check_sriov_allowed()
844 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { in ice_check_sriov_allowed()
849 if (ice_is_safe_mode(pf)) { in ice_check_sriov_allowed()
854 if (!ice_pf_state_is_nominal(pf)) { in ice_check_sriov_allowed()
870 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_sriov_get_vf_total_msix() local
872 return pf->virt_irq_tracker.num_entries; in ice_sriov_get_vf_total_msix()
875 static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id) in ice_sriov_remap_vectors() argument
884 ice_for_each_vf(pf, bkt, tmp_vf) { in ice_sriov_remap_vectors()
891 ice_virt_free_irqs(pf, tmp_vf->first_vector_idx, in ice_sriov_remap_vectors()
899 tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]); in ice_sriov_remap_vectors()
904 ice_virt_get_irqs(pf, tmp_vf->num_msix); in ice_sriov_remap_vectors()
930 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_sriov_set_msix_vec_count() local
937 if (!ice_get_num_vfs(pf)) in ice_sriov_set_msix_vec_count()
947 if (queues > min(ice_get_avail_txq_count(pf), in ice_sriov_set_msix_vec_count()
948 ice_get_avail_rxq_count(pf))) in ice_sriov_set_msix_vec_count()
963 vf = ice_get_vf_by_id(pf, id); in ice_sriov_set_msix_vec_count()
978 ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix); in ice_sriov_set_msix_vec_count()
981 ice_sriov_remap_vectors(pf, vf->vf_id); in ice_sriov_set_msix_vec_count()
985 vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix); in ice_sriov_set_msix_vec_count()
998 dev_info(ice_pf_to_dev(pf), in ice_sriov_set_msix_vec_count()
1008 dev_info(ice_pf_to_dev(pf), in ice_sriov_set_msix_vec_count()
1015 vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix); in ice_sriov_set_msix_vec_count()
1045 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_sriov_configure() local
1046 struct device *dev = ice_pf_to_dev(pf); in ice_sriov_configure()
1049 err = ice_check_sriov_allowed(pf); in ice_sriov_configure()
1055 ice_free_vfs(pf); in ice_sriov_configure()
1063 err = ice_pci_sriov_ena(pf, num_vfs); in ice_sriov_configure()
1072 * @pf: pointer to the PF structure
1077 void ice_process_vflr_event(struct ice_pf *pf) in ice_process_vflr_event() argument
1079 struct ice_hw *hw = &pf->hw; in ice_process_vflr_event()
1084 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || in ice_process_vflr_event()
1085 !ice_has_vfs(pf)) in ice_process_vflr_event()
1088 mutex_lock(&pf->vfs.table_lock); in ice_process_vflr_event()
1089 ice_for_each_vf(pf, bkt, vf) { in ice_process_vflr_event()
1100 mutex_unlock(&pf->vfs.table_lock); in ice_process_vflr_event()
1104 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1105 * @pf: PF used to index all VFs
1106 * @pfq: queue index relative to the PF's function space
1115 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) in ice_get_vf_from_pfq() argument
1121 ice_for_each_vf_rcu(pf, bkt, vf) { in ice_get_vf_from_pfq()
1147 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1148 * @pf: PF used for conversion
1149 * @globalq: global queue index used to convert to PF space queue index
1151 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) in ice_globalq_to_pfq() argument
1153 return globalq - pf->hw.func_caps.common_cap.rxq_first_id; in ice_globalq_to_pfq()
1158 * @pf: PF that the LAN overflow event happened on
1166 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_vf_lan_overflow_event() argument
1172 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); in ice_vf_lan_overflow_event()
1177 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); in ice_vf_lan_overflow_event()
1196 struct ice_pf *pf = np->vsi->back; in ice_set_vf_spoofchk() local
1202 dev = ice_pf_to_dev(pf); in ice_set_vf_spoofchk()
1204 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_spoofchk()
1256 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_get_vf_cfg() local
1260 vf = ice_get_vf_by_id(pf, vf_id); in ice_get_vf_cfg()
1295 * @pf: PF to be configure
1302 int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac) in __ice_set_vf_mac() argument
1308 dev = ice_pf_to_dev(pf); in __ice_set_vf_mac()
1314 vf = ice_get_vf_by_id(pf, vf_id); in __ice_set_vf_mac()
1331 /* VF is notified of its new MAC via the PF's response to the in __ice_set_vf_mac()
1342 /* PF will add MAC rule for the VF */ in __ice_set_vf_mac()
1380 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_trust() local
1384 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_trust()
1388 if (ice_is_eswitch_mode_switchdev(pf)) { in ice_set_vf_trust()
1389 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); in ice_set_vf_trust()
1407 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", in ice_set_vf_trust()
1427 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_link_state() local
1431 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_link_state()
1465 * @pf: PF associated with VFs
1467 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) in ice_calc_all_vfs_min_tx_rate() argument
1474 ice_for_each_vf_rcu(pf, bkt, vf) in ice_calc_all_vfs_min_tx_rate()
1504 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); in ice_min_tx_rate_oversubscribed()
1510 …dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d… in ice_min_tx_rate_oversubscribed()
1531 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_bw() local
1537 dev = ice_pf_to_dev(pf); in ice_set_vf_bw()
1539 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_bw()
1553 if (min_tx_rate && ice_is_dcb_active(pf)) { in ice_set_vf_bw()
1554 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); in ice_set_vf_bw()
1593 * @netdev: the netdev of the PF
1600 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_get_vf_stats() local
1606 vf = ice_get_vf_by_id(pf, vf_id); in ice_get_vf_stats()
1682 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_port_vlan() local
1688 dev = ice_pf_to_dev(pf); in ice_set_vf_port_vlan()
1696 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { in ice_set_vf_port_vlan()
1702 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_port_vlan()
1743 struct ice_pf *pf = vf->pf; in ice_print_vf_rx_mdd_event() local
1746 dev = ice_pf_to_dev(pf); in ice_print_vf_rx_mdd_event()
1748 …dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-r… in ice_print_vf_rx_mdd_event()
1749 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, in ice_print_vf_rx_mdd_event()
1751 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) in ice_print_vf_rx_mdd_event()
1761 struct ice_pf *pf = vf->pf; in ice_print_vf_tx_mdd_event() local
1764 dev = ice_pf_to_dev(pf); in ice_print_vf_tx_mdd_event()
1766 …dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-r… in ice_print_vf_tx_mdd_event()
1767 vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id, in ice_print_vf_tx_mdd_event()
1769 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) in ice_print_vf_tx_mdd_event()
1775 * @pf: pointer to the PF structure
1779 void ice_print_vfs_mdd_events(struct ice_pf *pf) in ice_print_vfs_mdd_events() argument
1785 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) in ice_print_vfs_mdd_events()
1789 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) in ice_print_vfs_mdd_events()
1792 pf->vfs.last_printed_mdd_jiffies = jiffies; in ice_print_vfs_mdd_events()
1794 mutex_lock(&pf->vfs.table_lock); in ice_print_vfs_mdd_events()
1795 ice_for_each_vf(pf, bkt, vf) { in ice_print_vfs_mdd_events()
1810 mutex_unlock(&pf->vfs.table_lock); in ice_print_vfs_mdd_events()
1814 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
1815 * @pf: pointer to the PF structure
1817 * Called when recovering from a PF FLR to restore interrupt capability to
1820 void ice_restore_all_vfs_msi_state(struct ice_pf *pf) in ice_restore_all_vfs_msi_state() argument
1825 ice_for_each_vf(pf, bkt, vf) in ice_restore_all_vfs_msi_state()