Lines Matching full:pf
65 struct ice_pf *pf = container_of(hw, struct ice_pf, hw); in ice_hw_to_dev() local
67 return &pf->pdev->dev; in ice_hw_to_dev()
75 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
77 static void ice_vsi_release_all(struct ice_pf *pf);
79 static int ice_rebuild_channels(struct ice_pf *pf);
113 * @pf: pointer to PF struct
115 static void ice_check_for_hang_subtask(struct ice_pf *pf) in ice_check_for_hang_subtask() argument
123 ice_for_each_vsi(pf, v) in ice_check_for_hang_subtask()
124 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { in ice_check_for_hang_subtask()
125 vsi = pf->vsi[v]; in ice_check_for_hang_subtask()
177 * @pf: board private structure
179 * Set initial set of MAC filters for PF VSI; configure filters for permanent
183 static int ice_init_mac_fltr(struct ice_pf *pf) in ice_init_mac_fltr() argument
188 vsi = ice_get_main_vsi(pf); in ice_init_mac_fltr()
261 * ice_set_promisc - Enable promiscuous mode for a given PF
290 * ice_clear_promisc - Disable promiscuous mode for a given PF
328 struct ice_pf *pf = vsi->back; in ice_vsi_sync_fltr() local
329 struct ice_hw *hw = &pf->hw; in ice_vsi_sync_fltr()
483 * @pf: board private structure
485 static void ice_sync_fltr_subtask(struct ice_pf *pf) in ice_sync_fltr_subtask() argument
489 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) in ice_sync_fltr_subtask()
492 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
494 ice_for_each_vsi(pf, v) in ice_sync_fltr_subtask()
495 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && in ice_sync_fltr_subtask()
496 ice_vsi_sync_fltr(pf->vsi[v])) { in ice_sync_fltr_subtask()
498 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
504 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
505 * @pf: the PF
508 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) in ice_pf_dis_all_vsi() argument
513 ice_for_each_vsi(pf, v) in ice_pf_dis_all_vsi()
514 if (pf->vsi[v]) in ice_pf_dis_all_vsi()
515 ice_dis_vsi(pf->vsi[v], locked); in ice_pf_dis_all_vsi()
518 pf->pf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
521 pf->vf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
526 * @pf: board private structure
532 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) in ice_prepare_for_reset() argument
534 struct ice_hw *hw = &pf->hw; in ice_prepare_for_reset()
539 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); in ice_prepare_for_reset()
542 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) in ice_prepare_for_reset()
545 synchronize_irq(pf->oicr_irq.virq); in ice_prepare_for_reset()
547 ice_unplug_aux_dev(pf); in ice_prepare_for_reset()
551 ice_vc_notify_reset(pf); in ice_prepare_for_reset()
554 mutex_lock(&pf->vfs.table_lock); in ice_prepare_for_reset()
555 ice_for_each_vf(pf, bkt, vf) in ice_prepare_for_reset()
557 mutex_unlock(&pf->vfs.table_lock); in ice_prepare_for_reset()
559 if (ice_is_eswitch_mode_switchdev(pf)) { in ice_prepare_for_reset()
561 ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge); in ice_prepare_for_reset()
566 vsi = ice_get_main_vsi(pf); in ice_prepare_for_reset()
575 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_prepare_for_reset()
591 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_prepare_for_reset()
603 set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state); in ice_prepare_for_reset()
604 ice_pf_dis_all_vsi(pf, false); in ice_prepare_for_reset()
606 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_prepare_for_reset()
607 ice_ptp_prepare_for_reset(pf, reset_type); in ice_prepare_for_reset()
609 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_prepare_for_reset()
610 ice_gnss_exit(pf); in ice_prepare_for_reset()
617 set_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_prepare_for_reset()
622 * @pf: board private structure
625 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) in ice_do_reset() argument
627 struct device *dev = ice_pf_to_dev(pf); in ice_do_reset()
628 struct ice_hw *hw = &pf->hw; in ice_do_reset()
632 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) { in ice_do_reset()
637 ice_prepare_for_reset(pf, reset_type); in ice_do_reset()
642 set_bit(ICE_RESET_FAILED, pf->state); in ice_do_reset()
643 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_do_reset()
644 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
645 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
646 clear_bit(ICE_CORER_REQ, pf->state); in ice_do_reset()
647 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_do_reset()
648 wake_up(&pf->reset_wait_queue); in ice_do_reset()
657 pf->pfr_count++; in ice_do_reset()
658 ice_rebuild(pf, reset_type); in ice_do_reset()
659 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
660 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
661 wake_up(&pf->reset_wait_queue); in ice_do_reset()
662 ice_reset_all_vfs(pf); in ice_do_reset()
668 * @pf: board private structure
670 static void ice_reset_subtask(struct ice_pf *pf) in ice_reset_subtask() argument
676 * of reset is pending and sets bits in pf->state indicating the reset in ice_reset_subtask()
678 * prepare for pending reset if not already (for PF software-initiated in ice_reset_subtask()
684 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_reset_subtask()
686 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) in ice_reset_subtask()
688 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) in ice_reset_subtask()
690 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) in ice_reset_subtask()
695 ice_prepare_for_reset(pf, reset_type); in ice_reset_subtask()
698 if (ice_check_reset(&pf->hw)) { in ice_reset_subtask()
699 set_bit(ICE_RESET_FAILED, pf->state); in ice_reset_subtask()
702 pf->hw.reset_ongoing = false; in ice_reset_subtask()
703 ice_rebuild(pf, reset_type); in ice_reset_subtask()
707 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_reset_subtask()
708 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_reset_subtask()
709 clear_bit(ICE_PFR_REQ, pf->state); in ice_reset_subtask()
710 clear_bit(ICE_CORER_REQ, pf->state); in ice_reset_subtask()
711 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_reset_subtask()
712 wake_up(&pf->reset_wait_queue); in ice_reset_subtask()
713 ice_reset_all_vfs(pf); in ice_reset_subtask()
720 if (test_bit(ICE_PFR_REQ, pf->state)) { in ice_reset_subtask()
722 if (pf->lag && pf->lag->bonded) { in ice_reset_subtask()
723 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n"); in ice_reset_subtask()
727 if (test_bit(ICE_CORER_REQ, pf->state)) in ice_reset_subtask()
729 if (test_bit(ICE_GLOBR_REQ, pf->state)) in ice_reset_subtask()
736 if (!test_bit(ICE_DOWN, pf->state) && in ice_reset_subtask()
737 !test_bit(ICE_CFG_BUSY, pf->state)) { in ice_reset_subtask()
738 ice_do_reset(pf, reset_type); in ice_reset_subtask()
934 * @pf: private PF struct
943 static void ice_set_dflt_mib(struct ice_pf *pf) in ice_set_dflt_mib() argument
945 struct device *dev = ice_pf_to_dev(pf); in ice_set_dflt_mib()
949 struct ice_hw *hw = &pf->hw; in ice_set_dflt_mib()
1022 * @pf: pointer to PF struct
1027 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) in ice_check_phy_fw_load() argument
1030 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1034 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) in ice_check_phy_fw_load()
1038 …dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and… in ice_check_phy_fw_load()
1039 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1045 * @pf: pointer to PF struct
1051 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) in ice_check_module_power() argument
1056 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1063 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) in ice_check_module_power()
1067 …dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cann… in ice_check_module_power()
1068 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1070 …dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cann… in ice_check_module_power()
1071 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1077 * @pf: pointer to the PF struct
1083 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) in ice_check_link_cfg_err() argument
1085 ice_check_module_power(pf, link_cfg_err); in ice_check_link_cfg_err()
1086 ice_check_phy_fw_load(pf, link_cfg_err); in ice_check_link_cfg_err()
1091 * @pf: PF that the link event is associated with
1099 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, in ice_link_event() argument
1102 struct device *dev = ice_pf_to_dev(pf); in ice_link_event()
1124 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_link_event()
1132 vsi = ice_get_main_vsi(pf); in ice_link_event()
1137 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && in ice_link_event()
1139 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_link_event()
1147 ice_ptp_link_change(pf, link_up); in ice_link_event()
1149 if (ice_is_dcb_active(pf)) { in ice_link_event()
1150 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_link_event()
1151 ice_dcb_rebuild(pf); in ice_link_event()
1154 ice_set_dflt_mib(pf); in ice_link_event()
1159 ice_vc_notify_link_state(pf); in ice_link_event()
1166 * @pf: board private structure
1168 static void ice_watchdog_subtask(struct ice_pf *pf) in ice_watchdog_subtask() argument
1173 if (test_bit(ICE_DOWN, pf->state) || in ice_watchdog_subtask()
1174 test_bit(ICE_CFG_BUSY, pf->state)) in ice_watchdog_subtask()
1179 pf->serv_tmr_prev + pf->serv_tmr_period)) in ice_watchdog_subtask()
1182 pf->serv_tmr_prev = jiffies; in ice_watchdog_subtask()
1187 ice_update_pf_stats(pf); in ice_watchdog_subtask()
1188 ice_for_each_vsi(pf, i) in ice_watchdog_subtask()
1189 if (pf->vsi[i] && pf->vsi[i]->netdev) in ice_watchdog_subtask()
1190 ice_update_vsi_stats(pf->vsi[i]); in ice_watchdog_subtask()
1224 * @pf: PF that the link event is associated with
1228 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_handle_link_event() argument
1235 port_info = pf->hw.port_info; in ice_handle_link_event()
1239 status = ice_link_event(pf, port_info, in ice_handle_link_event()
1243 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", in ice_handle_link_event()
1251 * @pf: PF that the FW log event is associated with
1255 ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_get_fwlog_data() argument
1258 struct ice_hw *hw = &pf->hw; in ice_get_fwlog_data()
1277 * @pf: pointer to the PF private structure
1282 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1291 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, in ice_aq_prep_for_event() argument
1298 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_prep_for_event()
1299 hlist_add_head(&task->entry, &pf->aq_wait_list); in ice_aq_prep_for_event()
1300 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_prep_for_event()
1305 * @pf: pointer to the PF private structure
1309 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1315 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, in ice_aq_wait_for_event() argument
1319 struct device *dev = ice_pf_to_dev(pf); in ice_aq_wait_for_event()
1324 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, in ice_aq_wait_for_event()
1352 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1354 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1361 * @pf: pointer to the PF private structure
1377 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, in ice_aq_check_events() argument
1384 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1385 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { in ice_aq_check_events()
1405 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1408 wake_up(&pf->aq_wait_queue); in ice_aq_check_events()
1413 * @pf: the PF private structure
1418 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) in ice_aq_cancel_waiting_tasks() argument
1422 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1423 hlist_for_each_entry(task, &pf->aq_wait_list, entry) in ice_aq_cancel_waiting_tasks()
1425 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1427 wake_up(&pf->aq_wait_queue); in ice_aq_cancel_waiting_tasks()
1434 * @pf: ptr to struct ice_pf
1437 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) in __ice_clean_ctrlq() argument
1439 struct device *dev = ice_pf_to_dev(pf); in __ice_clean_ctrlq()
1441 struct ice_hw *hw = &pf->hw; in __ice_clean_ctrlq()
1447 /* Do not clean control queue if/when PF reset fails */ in __ice_clean_ctrlq()
1448 if (test_bit(ICE_RESET_FAILED, pf->state)) in __ice_clean_ctrlq()
1538 ice_aq_check_events(pf, opcode, &event); in __ice_clean_ctrlq()
1542 if (ice_handle_link_event(pf, &event)) in __ice_clean_ctrlq()
1546 ice_vf_lan_overflow_event(pf, &event); in __ice_clean_ctrlq()
1549 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) { in __ice_clean_ctrlq()
1550 ice_vc_process_vf_msg(pf, &event, NULL); in __ice_clean_ctrlq()
1561 ice_vc_process_vf_msg(pf, &event, &data); in __ice_clean_ctrlq()
1565 ice_get_fwlog_data(pf, &event); in __ice_clean_ctrlq()
1568 ice_dcb_process_lldp_set_mib_change(pf, &event); in __ice_clean_ctrlq()
1571 ice_process_health_status_event(pf, &event); in __ice_clean_ctrlq()
1602 * @pf: board private structure
1604 static void ice_clean_adminq_subtask(struct ice_pf *pf) in ice_clean_adminq_subtask() argument
1606 struct ice_hw *hw = &pf->hw; in ice_clean_adminq_subtask()
1608 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_clean_adminq_subtask()
1611 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) in ice_clean_adminq_subtask()
1614 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_clean_adminq_subtask()
1622 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); in ice_clean_adminq_subtask()
1629 * @pf: board private structure
1631 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) in ice_clean_mailboxq_subtask() argument
1633 struct ice_hw *hw = &pf->hw; in ice_clean_mailboxq_subtask()
1635 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) in ice_clean_mailboxq_subtask()
1638 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) in ice_clean_mailboxq_subtask()
1641 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_clean_mailboxq_subtask()
1644 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); in ice_clean_mailboxq_subtask()
1651 * @pf: board private structure
1653 static void ice_clean_sbq_subtask(struct ice_pf *pf) in ice_clean_sbq_subtask() argument
1655 struct ice_hw *hw = &pf->hw; in ice_clean_sbq_subtask()
1661 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1665 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) in ice_clean_sbq_subtask()
1668 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) in ice_clean_sbq_subtask()
1671 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1674 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); in ice_clean_sbq_subtask()
1681 * @pf: board private structure
1685 void ice_service_task_schedule(struct ice_pf *pf) in ice_service_task_schedule() argument
1687 if (!test_bit(ICE_SERVICE_DIS, pf->state) && in ice_service_task_schedule()
1688 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && in ice_service_task_schedule()
1689 !test_bit(ICE_NEEDS_RESTART, pf->state)) in ice_service_task_schedule()
1690 queue_work(ice_wq, &pf->serv_task); in ice_service_task_schedule()
1695 * @pf: board private structure
1697 static void ice_service_task_complete(struct ice_pf *pf) in ice_service_task_complete() argument
1699 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); in ice_service_task_complete()
1701 /* force memory (pf->state) to sync before next service task */ in ice_service_task_complete()
1703 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_complete()
1708 * @pf: board private structure
1713 static int ice_service_task_stop(struct ice_pf *pf) in ice_service_task_stop() argument
1717 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_stop()
1719 if (pf->serv_tmr.function) in ice_service_task_stop()
1720 timer_delete_sync(&pf->serv_tmr); in ice_service_task_stop()
1721 if (pf->serv_task.func) in ice_service_task_stop()
1722 cancel_work_sync(&pf->serv_task); in ice_service_task_stop()
1724 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_stop()
1730 * @pf: board private structure
1734 static void ice_service_task_restart(struct ice_pf *pf) in ice_service_task_restart() argument
1736 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_restart()
1737 ice_service_task_schedule(pf); in ice_service_task_restart()
1746 struct ice_pf *pf = from_timer(pf, t, serv_tmr); in ice_service_timer() local
1748 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); in ice_service_timer()
1749 ice_service_task_schedule(pf); in ice_service_timer()
1754 * @pf: pointer to the PF structure
1759 * Since the queue can get stuck on VF MDD events, the PF can be configured to
1763 static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf, in ice_mdd_maybe_reset_vf() argument
1766 struct device *dev = ice_pf_to_dev(pf); in ice_mdd_maybe_reset_vf()
1768 if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) in ice_mdd_maybe_reset_vf()
1780 dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n", in ice_mdd_maybe_reset_vf()
1781 pf->hw.pf_id, vf->vf_id); in ice_mdd_maybe_reset_vf()
1787 * @pf: pointer to the PF structure
1790 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1792 * disable the queue, the PF can be configured to reset the VF using ethtool
1795 static void ice_handle_mdd_event(struct ice_pf *pf) in ice_handle_mdd_event() argument
1797 struct device *dev = ice_pf_to_dev(pf); in ice_handle_mdd_event()
1798 struct ice_hw *hw = &pf->hw; in ice_handle_mdd_event()
1803 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { in ice_handle_mdd_event()
1807 ice_print_vfs_mdd_events(pf); in ice_handle_mdd_event()
1819 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1820 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", in ice_handle_mdd_event()
1822 ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num, in ice_handle_mdd_event()
1834 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1835 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", in ice_handle_mdd_event()
1837 ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num, in ice_handle_mdd_event()
1849 if (netif_msg_rx_err(pf)) in ice_handle_mdd_event()
1850 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", in ice_handle_mdd_event()
1852 ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event, in ice_handle_mdd_event()
1857 /* check to see if this PF caused an MDD event */ in ice_handle_mdd_event()
1861 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1862 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); in ice_handle_mdd_event()
1868 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1869 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); in ice_handle_mdd_event()
1875 if (netif_msg_rx_err(pf)) in ice_handle_mdd_event()
1876 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); in ice_handle_mdd_event()
1882 mutex_lock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1883 ice_for_each_vf(pf, bkt, vf) { in ice_handle_mdd_event()
1890 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1891 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1902 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1903 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1914 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1915 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1926 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1927 if (netif_msg_rx_err(pf)) in ice_handle_mdd_event()
1935 ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx, in ice_handle_mdd_event()
1938 mutex_unlock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1940 ice_print_vfs_mdd_events(pf); in ice_handle_mdd_event()
2028 struct ice_pf *pf = pi->hw->back; in ice_init_nvm_phy_type() local
2039 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); in ice_init_nvm_phy_type()
2043 pf->nvm_phy_type_hi = pcaps->phy_type_high; in ice_init_nvm_phy_type()
2044 pf->nvm_phy_type_lo = pcaps->phy_type_low; in ice_init_nvm_phy_type()
2060 struct ice_pf *pf = pi->hw->back; in ice_init_link_dflt_override() local
2062 ldo = &pf->link_dflt_override; in ice_init_link_dflt_override()
2072 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); in ice_init_link_dflt_override()
2073 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); in ice_init_link_dflt_override()
2098 struct ice_pf *pf = pi->hw->back; in ice_init_phy_cfg_dflt_override() local
2100 ldo = &pf->link_dflt_override; in ice_init_phy_cfg_dflt_override()
2108 cfg->phy_type_low = pf->nvm_phy_type_lo & in ice_init_phy_cfg_dflt_override()
2110 cfg->phy_type_high = pf->nvm_phy_type_hi & in ice_init_phy_cfg_dflt_override()
2116 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); in ice_init_phy_cfg_dflt_override()
2137 struct ice_pf *pf = pi->hw->back; in ice_init_phy_user_cfg() local
2154 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); in ice_init_phy_user_cfg()
2164 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); in ice_init_phy_user_cfg()
2171 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { in ice_init_phy_user_cfg()
2186 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); in ice_init_phy_user_cfg()
2207 struct ice_pf *pf = vsi->back; in ice_configure_phy() local
2216 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && in ice_configure_phy()
2220 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) in ice_configure_phy()
2306 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); in ice_configure_phy()
2319 * @pf: pointer to PF struct
2324 static void ice_check_media_subtask(struct ice_pf *pf) in ice_check_media_subtask() argument
2331 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) in ice_check_media_subtask()
2334 vsi = ice_get_main_vsi(pf); in ice_check_media_subtask()
2344 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_check_media_subtask()
2347 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) in ice_check_media_subtask()
2359 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_check_media_subtask()
2369 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); in ice_service_task_recovery_mode() local
2371 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_service_task_recovery_mode()
2372 ice_clean_adminq_subtask(pf); in ice_service_task_recovery_mode()
2374 ice_service_task_complete(pf); in ice_service_task_recovery_mode()
2376 mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100)); in ice_service_task_recovery_mode()
2381 * @work: pointer to work_struct contained by the PF struct
2385 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); in ice_service_task() local
2388 if (pf->health_reporters.tx_hang_buf.tx_ring) { in ice_service_task()
2389 ice_report_tx_hang(pf); in ice_service_task()
2390 pf->health_reporters.tx_hang_buf.tx_ring = NULL; in ice_service_task()
2393 ice_reset_subtask(pf); in ice_service_task()
2396 if (ice_is_reset_in_progress(pf->state) || in ice_service_task()
2397 test_bit(ICE_SUSPENDED, pf->state) || in ice_service_task()
2398 test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_service_task()
2399 ice_service_task_complete(pf); in ice_service_task()
2403 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { in ice_service_task()
2410 swap(event->reg, pf->oicr_err_reg); in ice_service_task()
2411 ice_send_event_to_aux(pf, event); in ice_service_task()
2419 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) in ice_service_task()
2420 ice_unplug_aux_dev(pf); in ice_service_task()
2423 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) in ice_service_task()
2424 ice_plug_aux_dev(pf); in ice_service_task()
2426 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { in ice_service_task()
2432 ice_send_event_to_aux(pf, event); in ice_service_task()
2437 ice_clean_adminq_subtask(pf); in ice_service_task()
2438 ice_check_media_subtask(pf); in ice_service_task()
2439 ice_check_for_hang_subtask(pf); in ice_service_task()
2440 ice_sync_fltr_subtask(pf); in ice_service_task()
2441 ice_handle_mdd_event(pf); in ice_service_task()
2442 ice_watchdog_subtask(pf); in ice_service_task()
2444 if (ice_is_safe_mode(pf)) { in ice_service_task()
2445 ice_service_task_complete(pf); in ice_service_task()
2449 ice_process_vflr_event(pf); in ice_service_task()
2450 ice_clean_mailboxq_subtask(pf); in ice_service_task()
2451 ice_clean_sbq_subtask(pf); in ice_service_task()
2452 ice_sync_arfs_fltrs(pf); in ice_service_task()
2453 ice_flush_fdir_ctx(pf); in ice_service_task()
2456 ice_service_task_complete(pf); in ice_service_task()
2462 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || in ice_service_task()
2463 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || in ice_service_task()
2464 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || in ice_service_task()
2465 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || in ice_service_task()
2466 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || in ice_service_task()
2467 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || in ice_service_task()
2468 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_service_task()
2469 mod_timer(&pf->serv_tmr, jiffies); in ice_service_task()
2494 * @pf: board private structure
2497 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) in ice_schedule_reset() argument
2499 struct device *dev = ice_pf_to_dev(pf); in ice_schedule_reset()
2502 if (test_bit(ICE_RESET_FAILED, pf->state)) { in ice_schedule_reset()
2507 if (ice_is_reset_in_progress(pf->state)) { in ice_schedule_reset()
2514 set_bit(ICE_PFR_REQ, pf->state); in ice_schedule_reset()
2517 set_bit(ICE_CORER_REQ, pf->state); in ice_schedule_reset()
2520 set_bit(ICE_GLOBR_REQ, pf->state); in ice_schedule_reset()
2526 ice_service_task_schedule(pf); in ice_schedule_reset()
2554 struct ice_pf *pf = vsi->back; in ice_vsi_req_irq_msix() local
2561 dev = ice_pf_to_dev(pf); in ice_vsi_req_irq_msix()
2755 struct ice_pf *pf = vsi->back; in ice_prepare_xdp_rings() local
2757 .qs_mutex = &pf->avail_q_mutex, in ice_prepare_xdp_rings()
2758 .pf_map = pf->avail_txqs, in ice_prepare_xdp_rings()
2759 .pf_map_size = pf->max_pf_txqs, in ice_prepare_xdp_rings()
2769 dev = ice_pf_to_dev(pf); in ice_prepare_xdp_rings()
2830 mutex_lock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2832 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_prepare_xdp_rings()
2835 mutex_unlock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2846 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2852 struct ice_pf *pf = vsi->back; in ice_destroy_xdp_rings() local
2874 mutex_lock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2876 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_destroy_xdp_rings()
2879 mutex_unlock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2893 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); in ice_destroy_xdp_rings()
3073 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI"); in ice_xdp()
3096 * @pf: board private structure
3098 static void ice_ena_misc_vector(struct ice_pf *pf) in ice_ena_misc_vector() argument
3100 struct ice_hw *hw = &pf->hw; in ice_ena_misc_vector()
3128 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), in ice_ena_misc_vector()
3131 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_ena_misc_vector()
3134 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), in ice_ena_misc_vector()
3145 struct ice_pf *pf = data; in ice_ll_ts_intr() local
3153 hw = &pf->hw; in ice_ll_ts_intr()
3154 tx = &pf->ptp.port.tx; in ice_ll_ts_intr()
3167 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), in ice_ll_ts_intr()
3180 struct ice_pf *pf = (struct ice_pf *)data; in ice_misc_intr() local
3182 struct ice_hw *hw = &pf->hw; in ice_misc_intr()
3186 dev = ice_pf_to_dev(pf); in ice_misc_intr()
3187 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3188 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3189 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3196 pf->sw_int_count++; in ice_misc_intr()
3201 set_bit(ICE_MDD_EVENT_PENDING, pf->state); in ice_misc_intr()
3205 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { in ice_misc_intr()
3212 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); in ice_misc_intr()
3225 pf->corer_count++; in ice_misc_intr()
3227 pf->globr_count++; in ice_misc_intr()
3229 pf->empr_count++; in ice_misc_intr()
3234 * pf->state so that the service task can start a reset/rebuild. in ice_misc_intr()
3236 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_misc_intr()
3238 set_bit(ICE_CORER_RECV, pf->state); in ice_misc_intr()
3240 set_bit(ICE_GLOBR_RECV, pf->state); in ice_misc_intr()
3242 set_bit(ICE_EMPR_RECV, pf->state); in ice_misc_intr()
3250 * ICE_RESET_OICR_RECV in pf->state indicates in ice_misc_intr()
3264 ret = ice_ptp_ts_irq(pf); in ice_misc_intr()
3273 if (ice_pf_src_tmr_owned(pf)) { in ice_misc_intr()
3275 pf->ptp.ext_ts_irq |= gltsyn_stat & in ice_misc_intr()
3280 ice_ptp_extts_event(pf); in ice_misc_intr()
3286 pf->oicr_err_reg |= oicr; in ice_misc_intr()
3287 set_bit(ICE_AUX_ERR_PENDING, pf->state); in ice_misc_intr()
3300 set_bit(ICE_PFR_REQ, pf->state); in ice_misc_intr()
3303 ice_service_task_schedule(pf); in ice_misc_intr()
3317 struct ice_pf *pf = data; in ice_misc_intr_thread_fn() local
3320 hw = &pf->hw; in ice_misc_intr_thread_fn()
3322 if (ice_is_reset_in_progress(pf->state)) in ice_misc_intr_thread_fn()
3325 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { in ice_misc_intr_thread_fn()
3329 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { in ice_misc_intr_thread_fn()
3367 * @pf: board private structure
3369 static void ice_free_irq_msix_ll_ts(struct ice_pf *pf) in ice_free_irq_msix_ll_ts() argument
3371 int irq_num = pf->ll_ts_irq.virq; in ice_free_irq_msix_ll_ts()
3374 devm_free_irq(ice_pf_to_dev(pf), irq_num, pf); in ice_free_irq_msix_ll_ts()
3376 ice_free_irq(pf, pf->ll_ts_irq); in ice_free_irq_msix_ll_ts()
3381 * @pf: board private structure
3383 static void ice_free_irq_msix_misc(struct ice_pf *pf) in ice_free_irq_msix_misc() argument
3385 int misc_irq_num = pf->oicr_irq.virq; in ice_free_irq_msix_misc()
3386 struct ice_hw *hw = &pf->hw; in ice_free_irq_msix_misc()
3395 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); in ice_free_irq_msix_misc()
3397 ice_free_irq(pf, pf->oicr_irq); in ice_free_irq_msix_misc()
3398 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_free_irq_msix_misc()
3399 ice_free_irq_msix_ll_ts(pf); in ice_free_irq_msix_misc()
3437 * @pf: board private structure
3443 static int ice_req_irq_msix_misc(struct ice_pf *pf) in ice_req_irq_msix_misc() argument
3445 struct device *dev = ice_pf_to_dev(pf); in ice_req_irq_msix_misc()
3446 struct ice_hw *hw = &pf->hw; in ice_req_irq_msix_misc()
3451 if (!pf->int_name[0]) in ice_req_irq_msix_misc()
3452 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", in ice_req_irq_msix_misc()
3455 if (!pf->int_name_ll_ts[0]) in ice_req_irq_msix_misc()
3456 snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1, in ice_req_irq_msix_misc()
3462 if (ice_is_reset_in_progress(pf->state)) in ice_req_irq_msix_misc()
3466 irq = ice_alloc_irq(pf, false); in ice_req_irq_msix_misc()
3470 pf->oicr_irq = irq; in ice_req_irq_msix_misc()
3471 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, in ice_req_irq_msix_misc()
3473 pf->int_name, pf); in ice_req_irq_msix_misc()
3476 pf->int_name, err); in ice_req_irq_msix_misc()
3477 ice_free_irq(pf, pf->oicr_irq); in ice_req_irq_msix_misc()
3482 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_req_irq_msix_misc()
3485 irq = ice_alloc_irq(pf, false); in ice_req_irq_msix_misc()
3489 pf->ll_ts_irq = irq; in ice_req_irq_msix_misc()
3490 err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0, in ice_req_irq_msix_misc()
3491 pf->int_name_ll_ts, pf); in ice_req_irq_msix_misc()
3494 pf->int_name_ll_ts, err); in ice_req_irq_msix_misc()
3495 ice_free_irq(pf, pf->ll_ts_irq); in ice_req_irq_msix_misc()
3500 ice_ena_misc_vector(pf); in ice_req_irq_msix_misc()
3502 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); in ice_req_irq_msix_misc()
3505 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_req_irq_msix_misc()
3507 ((pf->ll_ts_irq.index + pf_intr_start_offset) & in ice_req_irq_msix_misc()
3509 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), in ice_req_irq_msix_misc()
3525 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_ops() local
3527 if (ice_is_safe_mode(pf)) { in ice_set_ops()
3534 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; in ice_set_ops()
3553 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_netdev_features() local
3554 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); in ice_set_netdev_features()
3560 if (ice_is_safe_mode(pf)) { in ice_set_netdev_features()
3640 if (ice_is_feature_supported(pf, ICE_F_GCS)) in ice_set_netdev_features()
3661 * ice_pf_vsi_setup - Set up a PF VSI
3662 * @pf: board private structure
3669 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) in ice_pf_vsi_setup() argument
3677 return ice_vsi_setup(pf, ¶ms); in ice_pf_vsi_setup()
3681 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, in ice_chnl_vsi_setup() argument
3691 return ice_vsi_setup(pf, ¶ms); in ice_chnl_vsi_setup()
3696 * @pf: board private structure
3703 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) in ice_ctrl_vsi_setup() argument
3711 return ice_vsi_setup(pf, ¶ms); in ice_ctrl_vsi_setup()
3716 * @pf: board private structure
3723 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) in ice_lb_vsi_setup() argument
3731 return ice_vsi_setup(pf, ¶ms); in ice_lb_vsi_setup()
3929 * @pf: pointer to an ice_pf instance
3931 u16 ice_get_avail_txq_count(struct ice_pf *pf) in ice_get_avail_txq_count() argument
3933 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, in ice_get_avail_txq_count()
3934 pf->max_pf_txqs); in ice_get_avail_txq_count()
3939 * @pf: pointer to an ice_pf instance
3941 u16 ice_get_avail_rxq_count(struct ice_pf *pf) in ice_get_avail_rxq_count() argument
3943 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, in ice_get_avail_rxq_count()
3944 pf->max_pf_rxqs); in ice_get_avail_rxq_count()
3949 * @pf: board private structure to initialize
3951 static void ice_deinit_pf(struct ice_pf *pf) in ice_deinit_pf() argument
3953 ice_service_task_stop(pf); in ice_deinit_pf()
3954 mutex_destroy(&pf->lag_mutex); in ice_deinit_pf()
3955 mutex_destroy(&pf->adev_mutex); in ice_deinit_pf()
3956 mutex_destroy(&pf->sw_mutex); in ice_deinit_pf()
3957 mutex_destroy(&pf->tc_mutex); in ice_deinit_pf()
3958 mutex_destroy(&pf->avail_q_mutex); in ice_deinit_pf()
3959 mutex_destroy(&pf->vfs.table_lock); in ice_deinit_pf()
3961 if (pf->avail_txqs) { in ice_deinit_pf()
3962 bitmap_free(pf->avail_txqs); in ice_deinit_pf()
3963 pf->avail_txqs = NULL; in ice_deinit_pf()
3966 if (pf->avail_rxqs) { in ice_deinit_pf()
3967 bitmap_free(pf->avail_rxqs); in ice_deinit_pf()
3968 pf->avail_rxqs = NULL; in ice_deinit_pf()
3971 if (pf->ptp.clock) in ice_deinit_pf()
3972 ptp_clock_unregister(pf->ptp.clock); in ice_deinit_pf()
3974 xa_destroy(&pf->dyn_ports); in ice_deinit_pf()
3975 xa_destroy(&pf->sf_nums); in ice_deinit_pf()
3980 * @pf: pointer to the PF instance
3982 static void ice_set_pf_caps(struct ice_pf *pf) in ice_set_pf_caps() argument
3984 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; in ice_set_pf_caps()
3986 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
3988 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
3989 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
3991 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
3992 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
3994 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
3995 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, in ice_set_pf_caps()
3998 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
4000 set_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
4002 clear_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
4009 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_set_pf_caps()
4010 set_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
4011 /* force guaranteed filter pool for PF */ in ice_set_pf_caps()
4012 ice_alloc_fd_guar_item(&pf->hw, &unused, in ice_set_pf_caps()
4014 /* force shared filter pool for PF */ in ice_set_pf_caps()
4015 ice_alloc_fd_shrd_item(&pf->hw, &unused, in ice_set_pf_caps()
4019 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
4021 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
4023 pf->max_pf_txqs = func_caps->common_cap.num_txq; in ice_set_pf_caps()
4024 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; in ice_set_pf_caps()
4029 * @pf: board private structure to initialize
4031 static int ice_init_pf(struct ice_pf *pf) in ice_init_pf() argument
4033 ice_set_pf_caps(pf); in ice_init_pf()
4035 mutex_init(&pf->sw_mutex); in ice_init_pf()
4036 mutex_init(&pf->tc_mutex); in ice_init_pf()
4037 mutex_init(&pf->adev_mutex); in ice_init_pf()
4038 mutex_init(&pf->lag_mutex); in ice_init_pf()
4040 INIT_HLIST_HEAD(&pf->aq_wait_list); in ice_init_pf()
4041 spin_lock_init(&pf->aq_wait_lock); in ice_init_pf()
4042 init_waitqueue_head(&pf->aq_wait_queue); in ice_init_pf()
4044 init_waitqueue_head(&pf->reset_wait_queue); in ice_init_pf()
4047 timer_setup(&pf->serv_tmr, ice_service_timer, 0); in ice_init_pf()
4048 pf->serv_tmr_period = HZ; in ice_init_pf()
4049 INIT_WORK(&pf->serv_task, ice_service_task); in ice_init_pf()
4050 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_init_pf()
4052 mutex_init(&pf->avail_q_mutex); in ice_init_pf()
4053 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); in ice_init_pf()
4054 if (!pf->avail_txqs) in ice_init_pf()
4057 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); in ice_init_pf()
4058 if (!pf->avail_rxqs) { in ice_init_pf()
4059 bitmap_free(pf->avail_txqs); in ice_init_pf()
4060 pf->avail_txqs = NULL; in ice_init_pf()
4064 mutex_init(&pf->vfs.table_lock); in ice_init_pf()
4065 hash_init(pf->vfs.table); in ice_init_pf()
4066 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) in ice_init_pf()
4067 wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH, in ice_init_pf()
4070 ice_mbx_init_snapshot(&pf->hw); in ice_init_pf()
4072 xa_init(&pf->dyn_ports); in ice_init_pf()
4073 xa_init(&pf->sf_nums); in ice_init_pf()
4090 * word) indicates WoL is not supported on the corresponding PF ID. in ice_is_wol_supported()
4111 struct ice_pf *pf = vsi->back; in ice_vsi_recfg_qs() local
4117 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { in ice_vsi_recfg_qs()
4134 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); in ice_vsi_recfg_qs()
4150 ice_pf_dcb_recfg(pf, locked); in ice_vsi_recfg_qs()
4155 dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n", in ice_vsi_recfg_qs()
4158 clear_bit(ICE_CFG_BUSY, pf->state); in ice_vsi_recfg_qs()
4163 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4164 * @pf: PF to configure
4166 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4169 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) in ice_set_safe_mode_vlan_cfg() argument
4171 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_set_safe_mode_vlan_cfg()
4183 hw = &pf->hw; in ice_set_safe_mode_vlan_cfg()
4222 struct ice_pf *pf = hw->back; in ice_log_pkg_init() local
4225 dev = ice_pf_to_dev(pf); in ice_log_pkg_init()
4299 * @pf: pointer to the PF instance
4305 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) in ice_load_pkg() argument
4308 struct device *dev = ice_pf_to_dev(pf); in ice_load_pkg()
4309 struct ice_hw *hw = &pf->hw; in ice_load_pkg()
4326 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4333 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4338 * @pf: pointer to the PF structure
4344 static void ice_verify_cacheline_size(struct ice_pf *pf) in ice_verify_cacheline_size() argument
4346 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) in ice_verify_cacheline_size()
4347 …dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts… in ice_verify_cacheline_size()
4353 * @pf: PF struct
4357 static int ice_send_version(struct ice_pf *pf) in ice_send_version() argument
4367 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); in ice_send_version()
4372 * @pf: pointer to the PF instance
4376 static int ice_init_fdir(struct ice_pf *pf) in ice_init_fdir() argument
4378 struct device *dev = ice_pf_to_dev(pf); in ice_init_fdir()
4383 * Allocate it and store it in the PF. in ice_init_fdir()
4385 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); in ice_init_fdir()
4397 mutex_init(&pf->hw.fdir_fltr_lock); in ice_init_fdir()
4399 err = ice_fdir_create_dflt_rules(pf); in ice_init_fdir()
4406 ice_fdir_release_flows(&pf->hw); in ice_init_fdir()
4410 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_init_fdir()
4411 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_init_fdir()
4412 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_init_fdir()
4417 static void ice_deinit_fdir(struct ice_pf *pf) in ice_deinit_fdir() argument
4419 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf); in ice_deinit_fdir()
4426 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_deinit_fdir()
4427 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_deinit_fdir()
4428 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_deinit_fdir()
4431 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); in ice_deinit_fdir()
4436 * @pf: pointer to the PF instance
4438 static char *ice_get_opt_fw_name(struct ice_pf *pf) in ice_get_opt_fw_name() argument
4443 struct pci_dev *pdev = pf->pdev; in ice_get_opt_fw_name()
4466 * @pf: pointer to the PF instance
4471 static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware) in ice_request_fw() argument
4473 char *opt_fw_filename = ice_get_opt_fw_name(pf); in ice_request_fw()
4474 struct device *dev = ice_pf_to_dev(pf); in ice_request_fw()
4505 struct ice_pf *pf = hw->back; in ice_init_tx_topology() local
4509 dev = ice_pf_to_dev(pf); in ice_init_tx_topology()
4533 * @pf: pointer to pf structure
4535 * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor
4536 * formats the PF hardware supports. The exact list of supported RXDIDs
4544 static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf) in ice_init_supported_rxdids() argument
4546 pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1); in ice_init_supported_rxdids()
4554 pf->supported_rxdids |= BIT(i); in ice_init_supported_rxdids()
4561 * @pf: pointer to pf structure
4568 static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf) in ice_init_ddp_config() argument
4570 struct device *dev = ice_pf_to_dev(pf); in ice_init_ddp_config()
4574 err = ice_request_fw(pf, &firmware); in ice_init_ddp_config()
4589 ice_load_pkg(firmware, pf); in ice_init_ddp_config()
4593 ice_init_supported_rxdids(hw, pf); in ice_init_ddp_config()
4600 * @pf: pointer to the PF struct
4602 static void ice_print_wake_reason(struct ice_pf *pf) in ice_print_wake_reason() argument
4604 u32 wus = pf->wakeup_reason; in ice_print_wake_reason()
4622 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); in ice_print_wake_reason()
4627 * @pf: pointer to the PF struct
4631 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module) in ice_pf_fwlog_update_module() argument
4633 struct ice_hw *hw = &pf->hw; in ice_pf_fwlog_update_module()
4717 int ice_init_dev(struct ice_pf *pf) in ice_init_dev() argument
4719 struct device *dev = ice_pf_to_dev(pf); in ice_init_dev()
4720 struct ice_hw *hw = &pf->hw; in ice_init_dev()
4723 ice_init_feature_support(pf); in ice_init_dev()
4725 err = ice_init_ddp_config(hw, pf); in ice_init_dev()
4728 * set in pf->state, which will cause ice_is_safe_mode to return in ice_init_dev()
4731 if (err || ice_is_safe_mode(pf)) { in ice_init_dev()
4740 err = ice_init_pf(pf); in ice_init_dev()
4746 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; in ice_init_dev()
4747 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; in ice_init_dev()
4748 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; in ice_init_dev()
4749 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; in ice_init_dev()
4750 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { in ice_init_dev()
4751 pf->hw.udp_tunnel_nic.tables[0].n_entries = in ice_init_dev()
4752 pf->hw.tnl.valid_count[TNL_VXLAN]; in ice_init_dev()
4753 pf->hw.udp_tunnel_nic.tables[0].tunnel_types = in ice_init_dev()
4756 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { in ice_init_dev()
4757 pf->hw.udp_tunnel_nic.tables[1].n_entries = in ice_init_dev()
4758 pf->hw.tnl.valid_count[TNL_GENEVE]; in ice_init_dev()
4759 pf->hw.udp_tunnel_nic.tables[1].tunnel_types = in ice_init_dev()
4763 err = ice_init_interrupt_scheme(pf); in ice_init_dev()
4775 err = ice_req_irq_msix_misc(pf); in ice_init_dev()
4784 ice_clear_interrupt_scheme(pf); in ice_init_dev()
4786 ice_deinit_pf(pf); in ice_init_dev()
4790 void ice_deinit_dev(struct ice_pf *pf) in ice_deinit_dev() argument
4792 ice_free_irq_msix_misc(pf); in ice_deinit_dev()
4793 ice_deinit_pf(pf); in ice_deinit_dev()
4794 ice_deinit_hw(&pf->hw); in ice_deinit_dev()
4797 ice_reset(&pf->hw, ICE_RESET_PFR); in ice_deinit_dev()
4798 pci_wait_for_pending_transaction(pf->pdev); in ice_deinit_dev()
4799 ice_clear_interrupt_scheme(pf); in ice_deinit_dev()
4802 static void ice_init_features(struct ice_pf *pf) in ice_init_features() argument
4804 struct device *dev = ice_pf_to_dev(pf); in ice_init_features()
4806 if (ice_is_safe_mode(pf)) in ice_init_features()
4810 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_init_features()
4811 ice_ptp_init(pf); in ice_init_features()
4813 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_init_features()
4814 ice_gnss_init(pf); in ice_init_features()
4816 if (ice_is_feature_supported(pf, ICE_F_CGU) || in ice_init_features()
4817 ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) in ice_init_features()
4818 ice_dpll_init(pf); in ice_init_features()
4821 if (ice_init_fdir(pf)) in ice_init_features()
4825 if (ice_init_pf_dcb(pf, false)) { in ice_init_features()
4826 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_init_features()
4827 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); in ice_init_features()
4829 ice_cfg_lldp_mib_change(&pf->hw, true); in ice_init_features()
4832 if (ice_init_lag(pf)) in ice_init_features()
4835 ice_hwmon_init(pf); in ice_init_features()
4838 static void ice_deinit_features(struct ice_pf *pf) in ice_deinit_features() argument
4840 if (ice_is_safe_mode(pf)) in ice_deinit_features()
4843 ice_deinit_lag(pf); in ice_deinit_features()
4844 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) in ice_deinit_features()
4845 ice_cfg_lldp_mib_change(&pf->hw, false); in ice_deinit_features()
4846 ice_deinit_fdir(pf); in ice_deinit_features()
4847 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_deinit_features()
4848 ice_gnss_exit(pf); in ice_deinit_features()
4849 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_deinit_features()
4850 ice_ptp_release(pf); in ice_deinit_features()
4851 if (test_bit(ICE_FLAG_DPLL, pf->flags)) in ice_deinit_features()
4852 ice_dpll_deinit(pf); in ice_deinit_features()
4853 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) in ice_deinit_features()
4854 xa_destroy(&pf->eswitch.reprs); in ice_deinit_features()
4857 static void ice_init_wakeup(struct ice_pf *pf) in ice_init_wakeup() argument
4860 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS); in ice_init_wakeup()
4863 ice_print_wake_reason(pf); in ice_init_wakeup()
4866 wr32(&pf->hw, PFPM_WUS, U32_MAX); in ice_init_wakeup()
4869 device_set_wakeup_enable(ice_pf_to_dev(pf), false); in ice_init_wakeup()
4872 static int ice_init_link(struct ice_pf *pf) in ice_init_link() argument
4874 struct device *dev = ice_pf_to_dev(pf); in ice_init_link()
4877 err = ice_init_link_events(pf->hw.port_info); in ice_init_link()
4884 err = ice_init_nvm_phy_type(pf->hw.port_info); in ice_init_link()
4889 err = ice_update_link_info(pf->hw.port_info); in ice_init_link()
4893 ice_init_link_dflt_override(pf->hw.port_info); in ice_init_link()
4895 ice_check_link_cfg_err(pf, in ice_init_link()
4896 pf->hw.port_info->phy.link_info.link_cfg_err); in ice_init_link()
4899 if (pf->hw.port_info->phy.link_info.link_info & in ice_init_link()
4902 err = ice_init_phy_user_cfg(pf->hw.port_info); in ice_init_link()
4906 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { in ice_init_link()
4907 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_init_link()
4913 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_init_link()
4919 static int ice_init_pf_sw(struct ice_pf *pf) in ice_init_pf_sw() argument
4921 bool dvm = ice_is_dvm_ena(&pf->hw); in ice_init_pf_sw()
4926 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL); in ice_init_pf_sw()
4927 if (!pf->first_sw) in ice_init_pf_sw()
4930 if (pf->hw.evb_veb) in ice_init_pf_sw()
4931 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; in ice_init_pf_sw()
4933 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; in ice_init_pf_sw()
4935 pf->first_sw->pf = pf; in ice_init_pf_sw()
4938 pf->first_sw->sw_id = pf->hw.port_info->sw_id; in ice_init_pf_sw()
4940 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_init_pf_sw()
4944 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); in ice_init_pf_sw()
4954 kfree(pf->first_sw); in ice_init_pf_sw()
4958 static void ice_deinit_pf_sw(struct ice_pf *pf) in ice_deinit_pf_sw() argument
4960 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_deinit_pf_sw()
4966 kfree(pf->first_sw); in ice_deinit_pf_sw()
4969 static int ice_alloc_vsis(struct ice_pf *pf) in ice_alloc_vsis() argument
4971 struct device *dev = ice_pf_to_dev(pf); in ice_alloc_vsis()
4973 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi; in ice_alloc_vsis()
4974 if (!pf->num_alloc_vsi) in ice_alloc_vsis()
4977 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in ice_alloc_vsis()
4980 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in ice_alloc_vsis()
4981 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in ice_alloc_vsis()
4984 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), in ice_alloc_vsis()
4986 if (!pf->vsi) in ice_alloc_vsis()
4989 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, in ice_alloc_vsis()
4990 sizeof(*pf->vsi_stats), GFP_KERNEL); in ice_alloc_vsis()
4991 if (!pf->vsi_stats) { in ice_alloc_vsis()
4992 devm_kfree(dev, pf->vsi); in ice_alloc_vsis()
4999 static void ice_dealloc_vsis(struct ice_pf *pf) in ice_dealloc_vsis() argument
5001 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats); in ice_dealloc_vsis()
5002 pf->vsi_stats = NULL; in ice_dealloc_vsis()
5004 pf->num_alloc_vsi = 0; in ice_dealloc_vsis()
5005 devm_kfree(ice_pf_to_dev(pf), pf->vsi); in ice_dealloc_vsis()
5006 pf->vsi = NULL; in ice_dealloc_vsis()
5009 static int ice_init_devlink(struct ice_pf *pf) in ice_init_devlink() argument
5013 err = ice_devlink_register_params(pf); in ice_init_devlink()
5017 ice_devlink_init_regions(pf); in ice_init_devlink()
5018 ice_devlink_register(pf); in ice_init_devlink()
5019 ice_health_init(pf); in ice_init_devlink()
5024 static void ice_deinit_devlink(struct ice_pf *pf) in ice_deinit_devlink() argument
5026 ice_health_deinit(pf); in ice_deinit_devlink()
5027 ice_devlink_unregister(pf); in ice_deinit_devlink()
5028 ice_devlink_destroy_regions(pf); in ice_deinit_devlink()
5029 ice_devlink_unregister_params(pf); in ice_deinit_devlink()
5032 static int ice_init(struct ice_pf *pf) in ice_init() argument
5036 err = ice_init_dev(pf); in ice_init()
5040 if (pf->hw.mac_type == ICE_MAC_E830) { in ice_init()
5041 err = pci_enable_ptm(pf->pdev, NULL); in ice_init()
5043 dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n"); in ice_init()
5046 err = ice_alloc_vsis(pf); in ice_init()
5050 err = ice_init_pf_sw(pf); in ice_init()
5054 ice_init_wakeup(pf); in ice_init()
5056 err = ice_init_link(pf); in ice_init()
5060 err = ice_send_version(pf); in ice_init()
5064 ice_verify_cacheline_size(pf); in ice_init()
5066 if (ice_is_safe_mode(pf)) in ice_init()
5067 ice_set_safe_mode_vlan_cfg(pf); in ice_init()
5070 pcie_print_link_status(pf->pdev); in ice_init()
5073 clear_bit(ICE_DOWN, pf->state); in ice_init()
5074 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_init()
5077 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_init()
5082 ice_deinit_pf_sw(pf); in ice_init()
5084 ice_dealloc_vsis(pf); in ice_init()
5086 ice_deinit_dev(pf); in ice_init()
5090 static void ice_deinit(struct ice_pf *pf) in ice_deinit() argument
5092 set_bit(ICE_SERVICE_DIS, pf->state); in ice_deinit()
5093 set_bit(ICE_DOWN, pf->state); in ice_deinit()
5095 ice_deinit_pf_sw(pf); in ice_deinit()
5096 ice_dealloc_vsis(pf); in ice_deinit()
5097 ice_deinit_dev(pf); in ice_deinit()
5101 * ice_load - load pf by init hw and starting VSI
5102 * @pf: pointer to the pf instance
5106 int ice_load(struct ice_pf *pf) in ice_load() argument
5111 devl_assert_locked(priv_to_devlink(pf)); in ice_load()
5113 vsi = ice_get_main_vsi(pf); in ice_load()
5125 err = ice_init_mac_fltr(pf); in ice_load()
5129 err = ice_devlink_create_pf_port(pf); in ice_load()
5133 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); in ice_load()
5145 ice_init_features(pf); in ice_load()
5147 err = ice_init_rdma(pf); in ice_load()
5151 ice_service_task_restart(pf); in ice_load()
5153 clear_bit(ICE_DOWN, pf->state); in ice_load()
5158 ice_deinit_features(pf); in ice_load()
5163 ice_devlink_destroy_pf_port(pf); in ice_load()
5171 * ice_unload - unload pf by stopping VSI and deinit hw
5172 * @pf: pointer to the pf instance
5176 void ice_unload(struct ice_pf *pf) in ice_unload() argument
5178 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_unload()
5180 devl_assert_locked(priv_to_devlink(pf)); in ice_unload()
5182 ice_deinit_rdma(pf); in ice_unload()
5183 ice_deinit_features(pf); in ice_unload()
5186 ice_devlink_destroy_pf_port(pf); in ice_unload()
5190 static int ice_probe_recovery_mode(struct ice_pf *pf) in ice_probe_recovery_mode() argument
5192 struct device *dev = ice_pf_to_dev(pf); in ice_probe_recovery_mode()
5197 INIT_HLIST_HEAD(&pf->aq_wait_list); in ice_probe_recovery_mode()
5198 spin_lock_init(&pf->aq_wait_lock); in ice_probe_recovery_mode()
5199 init_waitqueue_head(&pf->aq_wait_queue); in ice_probe_recovery_mode()
5201 timer_setup(&pf->serv_tmr, ice_service_timer, 0); in ice_probe_recovery_mode()
5202 pf->serv_tmr_period = HZ; in ice_probe_recovery_mode()
5203 INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode); in ice_probe_recovery_mode()
5204 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_probe_recovery_mode()
5205 err = ice_create_all_ctrlq(&pf->hw); in ice_probe_recovery_mode()
5209 scoped_guard(devl, priv_to_devlink(pf)) { in ice_probe_recovery_mode()
5210 err = ice_init_devlink(pf); in ice_probe_recovery_mode()
5215 ice_service_task_restart(pf); in ice_probe_recovery_mode()
5232 struct ice_pf *pf; in ice_probe() local
5268 pf = ice_allocate_pf(dev); in ice_probe()
5269 if (!pf) in ice_probe()
5273 pf->aux_idx = -1; in ice_probe()
5283 pf->pdev = pdev; in ice_probe()
5284 pci_set_drvdata(pdev, pf); in ice_probe()
5285 set_bit(ICE_DOWN, pf->state); in ice_probe()
5287 set_bit(ICE_SERVICE_DIS, pf->state); in ice_probe()
5289 hw = &pf->hw; in ice_probe()
5293 hw->back = pf; in ice_probe()
5304 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); in ice_probe()
5312 return ice_probe_recovery_mode(pf); in ice_probe()
5325 pf->adapter = adapter; in ice_probe()
5327 err = ice_init(pf); in ice_probe()
5331 devl_lock(priv_to_devlink(pf)); in ice_probe()
5332 err = ice_load(pf); in ice_probe()
5336 err = ice_init_devlink(pf); in ice_probe()
5339 devl_unlock(priv_to_devlink(pf)); in ice_probe()
5344 ice_unload(pf); in ice_probe()
5346 devl_unlock(priv_to_devlink(pf)); in ice_probe()
5347 ice_deinit(pf); in ice_probe()
5357 * @pf: pointer to the PF struct
5361 static void ice_set_wake(struct ice_pf *pf) in ice_set_wake() argument
5363 struct ice_hw *hw = &pf->hw; in ice_set_wake()
5364 bool wol = pf->wol_ena; in ice_set_wake()
5378 * @pf: pointer to the PF struct
5382 * wake, and that PF reset doesn't undo the LAA.
5384 static void ice_setup_mc_magic_wake(struct ice_pf *pf) in ice_setup_mc_magic_wake() argument
5386 struct device *dev = ice_pf_to_dev(pf); in ice_setup_mc_magic_wake()
5387 struct ice_hw *hw = &pf->hw; in ice_setup_mc_magic_wake()
5393 if (!pf->wol_ena) in ice_setup_mc_magic_wake()
5396 vsi = ice_get_main_vsi(pf); in ice_setup_mc_magic_wake()
5422 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_remove() local
5426 if (!ice_is_reset_in_progress(pf->state)) in ice_remove()
5431 if (ice_is_recovery_mode(&pf->hw)) { in ice_remove()
5432 ice_service_task_stop(pf); in ice_remove()
5433 scoped_guard(devl, priv_to_devlink(pf)) { in ice_remove()
5434 ice_deinit_devlink(pf); in ice_remove()
5439 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { in ice_remove()
5440 set_bit(ICE_VF_RESETS_DISABLED, pf->state); in ice_remove()
5441 ice_free_vfs(pf); in ice_remove()
5444 ice_hwmon_exit(pf); in ice_remove()
5446 ice_service_task_stop(pf); in ice_remove()
5447 ice_aq_cancel_waiting_tasks(pf); in ice_remove()
5448 set_bit(ICE_DOWN, pf->state); in ice_remove()
5450 if (!ice_is_safe_mode(pf)) in ice_remove()
5451 ice_remove_arfs(pf); in ice_remove()
5453 devl_lock(priv_to_devlink(pf)); in ice_remove()
5454 ice_dealloc_all_dynamic_ports(pf); in ice_remove()
5455 ice_deinit_devlink(pf); in ice_remove()
5457 ice_unload(pf); in ice_remove()
5458 devl_unlock(priv_to_devlink(pf)); in ice_remove()
5460 ice_deinit(pf); in ice_remove()
5461 ice_vsi_release_all(pf); in ice_remove()
5463 ice_setup_mc_magic_wake(pf); in ice_remove()
5464 ice_set_wake(pf); in ice_remove()
5475 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_shutdown() local
5480 pci_wake_from_d3(pdev, pf->wol_ena); in ice_shutdown()
5487 * @pf: board private structure
5491 static void ice_prepare_for_shutdown(struct ice_pf *pf) in ice_prepare_for_shutdown() argument
5493 struct ice_hw *hw = &pf->hw; in ice_prepare_for_shutdown()
5498 ice_vc_notify_reset(pf); in ice_prepare_for_shutdown()
5500 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); in ice_prepare_for_shutdown()
5503 ice_pf_dis_all_vsi(pf, false); in ice_prepare_for_shutdown()
5505 ice_for_each_vsi(pf, v) in ice_prepare_for_shutdown()
5506 if (pf->vsi[v]) in ice_prepare_for_shutdown()
5507 pf->vsi[v]->vsi_num = 0; in ice_prepare_for_shutdown()
5514 * @pf: board private structure to reinitialize
5522 static int ice_reinit_interrupt_scheme(struct ice_pf *pf) in ice_reinit_interrupt_scheme() argument
5524 struct device *dev = ice_pf_to_dev(pf); in ice_reinit_interrupt_scheme()
5531 ret = ice_init_interrupt_scheme(pf); in ice_reinit_interrupt_scheme()
5538 ice_for_each_vsi(pf, v) { in ice_reinit_interrupt_scheme()
5539 if (!pf->vsi[v]) in ice_reinit_interrupt_scheme()
5542 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5545 ice_vsi_map_rings_to_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5547 ice_vsi_set_napi_queues(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5551 ret = ice_req_irq_msix_misc(pf); in ice_reinit_interrupt_scheme()
5562 if (pf->vsi[v]) { in ice_reinit_interrupt_scheme()
5564 ice_vsi_clear_napi_queues(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5566 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5582 struct ice_pf *pf; in ice_suspend() local
5585 pf = pci_get_drvdata(pdev); in ice_suspend()
5587 if (!ice_pf_state_is_nominal(pf)) { in ice_suspend()
5598 disabled = ice_service_task_stop(pf); in ice_suspend()
5600 ice_deinit_rdma(pf); in ice_suspend()
5603 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { in ice_suspend()
5605 ice_service_task_restart(pf); in ice_suspend()
5609 if (test_bit(ICE_DOWN, pf->state) || in ice_suspend()
5610 ice_is_reset_in_progress(pf->state)) { in ice_suspend()
5613 ice_service_task_restart(pf); in ice_suspend()
5617 ice_setup_mc_magic_wake(pf); in ice_suspend()
5619 ice_prepare_for_shutdown(pf); in ice_suspend()
5621 ice_set_wake(pf); in ice_suspend()
5628 ice_free_irq_msix_misc(pf); in ice_suspend()
5629 ice_for_each_vsi(pf, v) { in ice_suspend()
5630 if (!pf->vsi[v]) in ice_suspend()
5633 ice_vsi_clear_napi_queues(pf->vsi[v]); in ice_suspend()
5635 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_suspend()
5637 ice_clear_interrupt_scheme(pf); in ice_suspend()
5640 pci_wake_from_d3(pdev, pf->wol_ena); in ice_suspend()
5653 struct ice_pf *pf; in ice_resume() local
5670 pf = pci_get_drvdata(pdev); in ice_resume()
5671 hw = &pf->hw; in ice_resume()
5673 pf->wakeup_reason = rd32(hw, PFPM_WUS); in ice_resume()
5674 ice_print_wake_reason(pf); in ice_resume()
5679 ret = ice_reinit_interrupt_scheme(pf); in ice_resume()
5683 ret = ice_init_rdma(pf); in ice_resume()
5688 clear_bit(ICE_DOWN, pf->state); in ice_resume()
5689 /* Now perform PF reset and rebuild */ in ice_resume()
5692 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_resume()
5694 if (ice_schedule_reset(pf, reset_type)) in ice_resume()
5697 clear_bit(ICE_SUSPENDED, pf->state); in ice_resume()
5698 ice_service_task_restart(pf); in ice_resume()
5701 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_resume()
5717 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_detected() local
5719 if (!pf) { in ice_pci_err_detected()
5725 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_detected()
5726 ice_service_task_stop(pf); in ice_pci_err_detected()
5728 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_detected()
5729 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_detected()
5730 ice_prepare_for_reset(pf, ICE_RESET_PFR); in ice_pci_err_detected()
5746 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_slot_reset() local
5763 reg = rd32(&pf->hw, GLGEN_RTRIG); in ice_pci_err_slot_reset()
5782 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_resume() local
5784 if (!pf) { in ice_pci_err_resume()
5790 if (test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_resume()
5796 ice_restore_all_vfs_msi_state(pf); in ice_pci_err_resume()
5798 ice_do_reset(pf, ICE_RESET_PFR); in ice_pci_err_resume()
5799 ice_service_task_restart(pf); in ice_pci_err_resume()
5800 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_pci_err_resume()
5809 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_reset_prepare() local
5811 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_reset_prepare()
5812 ice_service_task_stop(pf); in ice_pci_err_reset_prepare()
5814 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_reset_prepare()
5815 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_reset_prepare()
5816 ice_prepare_for_reset(pf, ICE_RESET_PFR); in ice_pci_err_reset_prepare()
5989 struct ice_pf *pf = vsi->back; in ice_set_mac_address() local
5990 struct ice_hw *hw = &pf->hw; in ice_set_mac_address()
6002 if (test_bit(ICE_DOWN, pf->state) || in ice_set_mac_address()
6003 ice_is_reset_in_progress(pf->state)) { in ice_set_mac_address()
6009 if (ice_chnl_dmac_fltr_cnt(pf)) { in ice_set_mac_address()
6321 * @vsi: PF's VSI
6337 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6338 * @vsi: PF's VSI
6385 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6386 * @vsi: PF's VSI
6456 * ice_set_loopback - turn on/off loopback mode on underlying PF
6492 struct ice_pf *pf = vsi->back; in ice_set_features() local
6496 if (ice_is_safe_mode(pf)) { in ice_set_features()
6497 dev_err(ice_pf_to_dev(pf), in ice_set_features()
6503 if (ice_is_reset_in_progress(pf->state)) { in ice_set_features()
6504 dev_err(ice_pf_to_dev(pf), in ice_set_features()
6544 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { in ice_set_features()
6545 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); in ice_set_features()
6552 assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena); in ice_set_features()
6561 if (ice_is_feature_supported(pf, ICE_F_GCS) && in ice_set_features()
6564 dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n"); in ice_set_features()
6566 dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n"); in ice_set_features()
6574 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6769 struct ice_pf *pf = vsi->back; in ice_up_complete() local
6793 ice_ptp_link_change(pf, true); in ice_up_complete()
6802 ice_service_task_schedule(pf); in ice_up_complete()
6885 struct ice_pf *pf = vsi->back; in ice_update_vsi_ring_stats() local
6932 * random value after PF reset. And as we increase the reported stat by in ice_update_vsi_ring_stats()
6936 if (likely(pf->stat_prev_loaded)) { in ice_update_vsi_ring_stats()
6959 struct ice_pf *pf = vsi->back; in ice_update_vsi_stats() local
6962 test_bit(ICE_CFG_BUSY, pf->state)) in ice_update_vsi_stats()
6978 cur_ns->rx_crc_errors = pf->stats.crc_errors; in ice_update_vsi_stats()
6979 cur_ns->rx_errors = pf->stats.crc_errors + in ice_update_vsi_stats()
6980 pf->stats.illegal_bytes + in ice_update_vsi_stats()
6981 pf->stats.rx_undersize + in ice_update_vsi_stats()
6982 pf->hw_csum_rx_error + in ice_update_vsi_stats()
6983 pf->stats.rx_jabber + in ice_update_vsi_stats()
6984 pf->stats.rx_fragments + in ice_update_vsi_stats()
6985 pf->stats.rx_oversize; in ice_update_vsi_stats()
6987 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; in ice_update_vsi_stats()
6992 * ice_update_pf_stats - Update PF port stats counters
6993 * @pf: PF whose stats needs to be updated
6995 void ice_update_pf_stats(struct ice_pf *pf) in ice_update_pf_stats() argument
6998 struct ice_hw *hw = &pf->hw; in ice_update_pf_stats()
7003 prev_ps = &pf->stats_prev; in ice_update_pf_stats()
7004 cur_ps = &pf->stats; in ice_update_pf_stats()
7006 if (ice_is_reset_in_progress(pf->state)) in ice_update_pf_stats()
7007 pf->stat_prev_loaded = false; in ice_update_pf_stats()
7009 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7013 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7017 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7021 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7025 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, in ice_update_pf_stats()
7029 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7033 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7037 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7041 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7045 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7049 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7052 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7055 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7058 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7061 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7064 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7067 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7070 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7073 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7076 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7079 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7082 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7085 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7088 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7095 pf->stat_prev_loaded, &prev_ps->fd_sb_match, in ice_update_pf_stats()
7097 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7100 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7103 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7106 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7109 ice_update_dcb_stats(pf); in ice_update_pf_stats()
7111 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7114 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7117 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7121 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7125 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7128 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7131 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7134 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7137 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; in ice_update_pf_stats()
7139 pf->stat_prev_loaded = true; in ice_update_pf_stats()
7211 struct ice_pf *pf = vsi->back; in ice_vsi_dis_irq() local
7212 struct ice_hw *hw = &pf->hw; in ice_vsi_dis_irq()
7407 struct ice_pf *pf = vsi->back; in ice_vsi_open_ctrl() local
7411 dev = ice_pf_to_dev(pf); in ice_vsi_open_ctrl()
7463 struct ice_pf *pf = vsi->back; in ice_vsi_open() local
7480 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); in ice_vsi_open()
7520 * @pf: PF from which all VSIs are being removed
7522 static void ice_vsi_release_all(struct ice_pf *pf) in ice_vsi_release_all() argument
7526 if (!pf->vsi) in ice_vsi_release_all()
7529 ice_for_each_vsi(pf, i) { in ice_vsi_release_all()
7530 if (!pf->vsi[i]) in ice_vsi_release_all()
7533 if (pf->vsi[i]->type == ICE_VSI_CHNL) in ice_vsi_release_all()
7536 err = ice_vsi_release(pf->vsi[i]); in ice_vsi_release_all()
7538 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", in ice_vsi_release_all()
7539 i, err, pf->vsi[i]->vsi_num); in ice_vsi_release_all()
7545 * @pf: pointer to the PF instance
7548 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7550 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) in ice_vsi_rebuild_by_type() argument
7552 struct device *dev = ice_pf_to_dev(pf); in ice_vsi_rebuild_by_type()
7555 ice_for_each_vsi(pf, i) { in ice_vsi_rebuild_by_type()
7556 struct ice_vsi *vsi = pf->vsi[i]; in ice_vsi_rebuild_by_type()
7570 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7580 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7598 * ice_update_pf_netdev_link - Update PF netdev link status
7599 * @pf: pointer to the PF instance
7601 static void ice_update_pf_netdev_link(struct ice_pf *pf) in ice_update_pf_netdev_link() argument
7606 ice_for_each_vsi(pf, i) { in ice_update_pf_netdev_link()
7607 struct ice_vsi *vsi = pf->vsi[i]; in ice_update_pf_netdev_link()
7612 ice_get_link_status(pf->vsi[i]->port_info, &link_up); in ice_update_pf_netdev_link()
7614 netif_carrier_on(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7615 netif_tx_wake_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7617 netif_carrier_off(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7618 netif_tx_stop_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7625 * @pf: PF to rebuild
7633 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) in ice_rebuild() argument
7635 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_rebuild()
7636 struct device *dev = ice_pf_to_dev(pf); in ice_rebuild()
7637 struct ice_hw *hw = &pf->hw; in ice_rebuild()
7641 if (test_bit(ICE_DOWN, pf->state)) in ice_rebuild()
7644 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); in ice_rebuild()
7652 pf->fw_emp_reset_disabled = false; in ice_rebuild()
7664 if (!ice_is_safe_mode(pf)) { in ice_rebuild()
7670 ice_load_pkg(NULL, pf); in ice_rebuild()
7675 dev_err(dev, "clear PF configuration failed %d\n", err); in ice_rebuild()
7701 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_rebuild()
7710 err = ice_req_irq_msix_misc(pf); in ice_rebuild()
7716 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7724 /* force guaranteed filter pool for PF */ in ice_rebuild()
7726 /* force shared filter pool for PF */ in ice_rebuild()
7731 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_rebuild()
7732 ice_dcb_rebuild(pf); in ice_rebuild()
7734 /* If the PF previously had enabled PTP, PTP init needs to happen before in ice_rebuild()
7738 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_rebuild()
7739 ice_ptp_rebuild(pf, reset_type); in ice_rebuild()
7741 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_rebuild()
7742 ice_gnss_init(pf); in ice_rebuild()
7744 /* rebuild PF VSI */ in ice_rebuild()
7745 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); in ice_rebuild()
7747 dev_err(dev, "PF VSI rebuild failed: %d\n", err); in ice_rebuild()
7752 err = ice_rebuild_channels(pf); in ice_rebuild()
7761 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7762 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); in ice_rebuild()
7773 ice_fdir_replay_fltrs(pf); in ice_rebuild()
7775 ice_rebuild_arfs(pf); in ice_rebuild()
7781 ice_update_pf_netdev_link(pf); in ice_rebuild()
7784 err = ice_send_version(pf); in ice_rebuild()
7794 clear_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7796 ice_health_clear(pf); in ice_rebuild()
7798 ice_plug_aux_dev(pf); in ice_rebuild()
7799 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) in ice_rebuild()
7800 ice_lag_rebuild(pf); in ice_rebuild()
7803 ice_ptp_restore_timestamp_mode(pf); in ice_rebuild()
7811 set_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7813 /* set this bit in PF state to control service task scheduling */ in ice_rebuild()
7814 set_bit(ICE_NEEDS_RESTART, pf->state); in ice_rebuild()
7829 struct ice_pf *pf = vsi->back; in ice_change_mtu() local
7848 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { in ice_change_mtu()
7858 if (ice_is_reset_in_progress(pf->state)) { in ice_change_mtu()
7878 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); in ice_change_mtu()
7892 struct ice_pf *pf = np->vsi->back; in ice_eth_ioctl() local
7896 return ice_ptp_get_ts_config(pf, ifr); in ice_eth_ioctl()
7898 return ice_ptp_set_ts_config(pf, ifr); in ice_eth_ioctl()
8122 struct ice_pf *pf = vsi->back; in ice_bridge_getlink() local
8125 bmode = pf->first_sw->bridge_mode; in ice_bridge_getlink()
8183 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
8193 struct ice_pf *pf = np->vsi->back; in ice_bridge_setlink() local
8195 struct ice_hw *hw = &pf->hw; in ice_bridge_setlink()
8199 pf_sw = pf->first_sw; in ice_bridge_setlink()
8213 /* Iterates through the PF VSI list and update the loopback in ice_bridge_setlink()
8216 ice_for_each_vsi(pf, v) { in ice_bridge_setlink()
8217 if (!pf->vsi[v]) in ice_bridge_setlink()
8219 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); in ice_bridge_setlink()
8254 struct ice_pf *pf = vsi->back; in ice_tx_timeout() local
8257 pf->tx_timeout_count++; in ice_tx_timeout()
8263 if (ice_is_pfc_causing_hung_q(pf, txqueue)) { in ice_tx_timeout()
8264 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", in ice_tx_timeout()
8280 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) in ice_tx_timeout()
8281 pf->tx_timeout_recovery_level = 1; in ice_tx_timeout()
8282 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + in ice_tx_timeout()
8287 struct ice_hw *hw = &pf->hw; in ice_tx_timeout()
8299 ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr); in ice_tx_timeout()
8302 pf->tx_timeout_last_recovery = jiffies; in ice_tx_timeout()
8304 pf->tx_timeout_recovery_level, txqueue); in ice_tx_timeout()
8306 switch (pf->tx_timeout_recovery_level) { in ice_tx_timeout()
8308 set_bit(ICE_PFR_REQ, pf->state); in ice_tx_timeout()
8311 set_bit(ICE_CORER_REQ, pf->state); in ice_tx_timeout()
8314 set_bit(ICE_GLOBR_REQ, pf->state); in ice_tx_timeout()
8318 set_bit(ICE_DOWN, pf->state); in ice_tx_timeout()
8320 set_bit(ICE_SERVICE_DIS, pf->state); in ice_tx_timeout()
8324 ice_service_task_schedule(pf); in ice_tx_timeout()
8325 pf->tx_timeout_recovery_level++; in ice_tx_timeout()
8388 struct ice_pf *pf = vsi->back; in ice_validate_mqprio_qopt() local
8403 dev = ice_pf_to_dev(pf); in ice_validate_mqprio_qopt()
8515 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8516 * @pf: ptr to PF device
8519 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) in ice_add_vsi_to_fdir() argument
8521 struct device *dev = ice_pf_to_dev(pf); in ice_add_vsi_to_fdir()
8529 hw = &pf->hw; in ice_add_vsi_to_fdir()
8576 * @pf: ptr to PF device
8582 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) in ice_add_channel() argument
8584 struct device *dev = ice_pf_to_dev(pf); in ice_add_channel()
8592 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); in ice_add_channel()
8598 ice_add_vsi_to_fdir(pf, vsi); in ice_add_channel()
8690 * @pf: ptr to PF device
8700 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, in ice_setup_hw_channel() argument
8703 struct device *dev = ice_pf_to_dev(pf); in ice_setup_hw_channel()
8709 ret = ice_add_channel(pf, sw_id, ch); in ice_setup_hw_channel()
8730 * @pf: ptr to PF device
8738 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, in ice_setup_channel() argument
8741 struct device *dev = ice_pf_to_dev(pf); in ice_setup_channel()
8750 sw_id = pf->first_sw->sw_id; in ice_setup_channel()
8753 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); in ice_setup_channel()
8791 struct ice_pf *pf = vsi->back; in ice_create_q_channel() local
8797 dev = ice_pf_to_dev(pf); in ice_create_q_channel()
8809 if (!ice_setup_channel(pf, vsi, ch)) { in ice_create_q_channel()
8834 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8839 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) in ice_rem_all_chnl_fltrs() argument
8846 &pf->tc_flower_fltr_list, in ice_rem_all_chnl_fltrs()
8858 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); in ice_rem_all_chnl_fltrs()
8861 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", in ice_rem_all_chnl_fltrs()
8864 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", in ice_rem_all_chnl_fltrs()
8874 pf->num_dmac_chnl_fltrs--; in ice_rem_all_chnl_fltrs()
8893 struct ice_pf *pf = vsi->back; in ice_remove_q_channels() local
8898 ice_rem_all_chnl_fltrs(pf); in ice_remove_q_channels()
8902 struct ice_hw *hw = &pf->hw; in ice_remove_q_channels()
8940 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); in ice_remove_q_channels()
8945 /* Delete VSI from FW, PF and HW VSI arrays */ in ice_remove_q_channels()
8963 * @pf: ptr to PF
8967 static int ice_rebuild_channels(struct ice_pf *pf) in ice_rebuild_channels() argument
8969 struct device *dev = ice_pf_to_dev(pf); in ice_rebuild_channels()
8977 main_vsi = ice_get_main_vsi(pf); in ice_rebuild_channels()
8981 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || in ice_rebuild_channels()
8996 ice_for_each_vsi(pf, i) { in ice_rebuild_channels()
8999 vsi = pf->vsi[i]; in ice_rebuild_channels()
9016 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_rebuild_channels()
9019 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_rebuild_channels()
9083 struct ice_pf *pf = vsi->back; in ice_create_q_channels() local
9113 dev_err(ice_pf_to_dev(pf), in ice_create_q_channels()
9120 dev_dbg(ice_pf_to_dev(pf), in ice_create_q_channels()
9141 struct ice_pf *pf = vsi->back; in ice_setup_tc_mqprio_qdisc() local
9148 dev = ice_pf_to_dev(pf); in ice_setup_tc_mqprio_qdisc()
9153 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
9166 if (pf->hw.port_info->is_custom_tx_enabled) { in ice_setup_tc_mqprio_qdisc()
9170 ice_tear_down_devlink_rate_tree(pf); in ice_setup_tc_mqprio_qdisc()
9179 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
9185 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); in ice_setup_tc_mqprio_qdisc()
9201 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_setup_tc_mqprio_qdisc()
9204 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
9205 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), in ice_setup_tc_mqprio_qdisc()
9207 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), in ice_setup_tc_mqprio_qdisc()
9244 clear_bit(ICE_RESET_FAILED, pf->state); in ice_setup_tc_mqprio_qdisc()
9260 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
9313 struct ice_pf *pf = np->vsi->back; in ice_setup_tc() local
9324 if (ice_is_eswitch_mode_switchdev(pf)) { in ice_setup_tc()
9329 if (pf->adev) { in ice_setup_tc()
9330 mutex_lock(&pf->adev_mutex); in ice_setup_tc()
9331 device_lock(&pf->adev->dev); in ice_setup_tc()
9333 if (pf->adev->dev.driver) { in ice_setup_tc()
9341 mutex_lock(&pf->tc_mutex); in ice_setup_tc()
9343 mutex_unlock(&pf->tc_mutex); in ice_setup_tc()
9347 device_unlock(&pf->adev->dev); in ice_setup_tc()
9348 mutex_unlock(&pf->adev_mutex); in ice_setup_tc()
9487 struct ice_pf *pf = np->vsi->back; in ice_open() local
9489 if (ice_is_reset_in_progress(pf->state)) { in ice_open()
9510 struct ice_pf *pf = vsi->back; in ice_open_internal() local
9514 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_open_internal()
9528 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_open_internal()
9532 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
9533 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { in ice_open_internal()
9549 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
9578 struct ice_pf *pf = vsi->back; in ice_stop() local
9580 if (ice_is_reset_in_progress(pf->state)) { in ice_stop()