Lines Matching +full:tx +full:- +full:rings +full:- +full:empty
1 // SPDX-License-Identifier: GPL-2.0
14 * ice_vsi_type_str - maps VSI type enum to string equivalents
38 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
40 * @ena: start or stop the Rx rings
42 * First enable/disable all of the Rx rings, flush any remaining writes, and
44 * let all of the register writes complete when enabling/disabling the Rx rings
55 ice_flush(&vsi->back->hw); in ice_vsi_ctrl_all_rx_rings()
67 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
75 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_arrays()
79 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_alloc_arrays()
82 /* allocate memory for both Tx and Rx ring pointers */ in ice_vsi_alloc_arrays()
83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, in ice_vsi_alloc_arrays()
84 sizeof(*vsi->tx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
85 if (!vsi->tx_rings) in ice_vsi_alloc_arrays()
86 return -ENOMEM; in ice_vsi_alloc_arrays()
88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
89 sizeof(*vsi->rx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
90 if (!vsi->rx_rings) in ice_vsi_alloc_arrays()
93 /* txq_map needs to have enough space to track both Tx (stack) rings in ice_vsi_alloc_arrays()
94 * and XDP rings; at this point vsi->num_xdp_txq might not be set, in ice_vsi_alloc_arrays()
99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), in ice_vsi_alloc_arrays()
100 sizeof(*vsi->txq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
102 if (!vsi->txq_map) in ice_vsi_alloc_arrays()
105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
106 sizeof(*vsi->rxq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
107 if (!vsi->rxq_map) in ice_vsi_alloc_arrays()
111 if (vsi->type == ICE_VSI_LB) in ice_vsi_alloc_arrays()
115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, in ice_vsi_alloc_arrays()
116 sizeof(*vsi->q_vectors), GFP_KERNEL); in ice_vsi_alloc_arrays()
117 if (!vsi->q_vectors) in ice_vsi_alloc_arrays()
120 vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); in ice_vsi_alloc_arrays()
121 if (!vsi->af_xdp_zc_qps) in ice_vsi_alloc_arrays()
127 devm_kfree(dev, vsi->q_vectors); in ice_vsi_alloc_arrays()
129 devm_kfree(dev, vsi->rxq_map); in ice_vsi_alloc_arrays()
131 devm_kfree(dev, vsi->txq_map); in ice_vsi_alloc_arrays()
133 devm_kfree(dev, vsi->rx_rings); in ice_vsi_alloc_arrays()
135 devm_kfree(dev, vsi->tx_rings); in ice_vsi_alloc_arrays()
136 return -ENOMEM; in ice_vsi_alloc_arrays()
140 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
145 switch (vsi->type) { in ice_vsi_set_num_desc()
151 * ethtool -G so we should keep those values instead of in ice_vsi_set_num_desc()
154 if (!vsi->num_rx_desc) in ice_vsi_set_num_desc()
155 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; in ice_vsi_set_num_desc()
156 if (!vsi->num_tx_desc) in ice_vsi_set_num_desc()
157 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; in ice_vsi_set_num_desc()
160 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", in ice_vsi_set_num_desc()
161 vsi->type); in ice_vsi_set_num_desc()
167 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
174 enum ice_vsi_type vsi_type = vsi->type; in ice_vsi_set_num_qs()
175 struct ice_pf *pf = vsi->back; in ice_vsi_set_num_qs()
176 struct ice_vf *vf = vsi->vf; in ice_vsi_set_num_qs()
183 if (vsi->req_txq) { in ice_vsi_set_num_qs()
184 vsi->alloc_txq = vsi->req_txq; in ice_vsi_set_num_qs()
185 vsi->num_txq = vsi->req_txq; in ice_vsi_set_num_qs()
187 vsi->alloc_txq = min3(pf->num_lan_msix, in ice_vsi_set_num_qs()
192 pf->num_lan_tx = vsi->alloc_txq; in ice_vsi_set_num_qs()
195 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_set_num_qs()
196 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
198 if (vsi->req_rxq) { in ice_vsi_set_num_qs()
199 vsi->alloc_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
200 vsi->num_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
202 vsi->alloc_rxq = min3(pf->num_lan_msix, in ice_vsi_set_num_qs()
208 pf->num_lan_rx = vsi->alloc_rxq; in ice_vsi_set_num_qs()
210 vsi->num_q_vectors = min_t(int, pf->num_lan_msix, in ice_vsi_set_num_qs()
211 max_t(int, vsi->alloc_rxq, in ice_vsi_set_num_qs()
212 vsi->alloc_txq)); in ice_vsi_set_num_qs()
217 * Tx and Rx rings are always equal in ice_vsi_set_num_qs()
219 if (vsi->req_txq && vsi->req_rxq) { in ice_vsi_set_num_qs()
220 vsi->alloc_txq = vsi->req_txq; in ice_vsi_set_num_qs()
221 vsi->alloc_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
223 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
224 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
227 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
230 if (vf->num_req_qs) in ice_vsi_set_num_qs()
231 vf->num_vf_qs = vf->num_req_qs; in ice_vsi_set_num_qs()
232 vsi->alloc_txq = vf->num_vf_qs; in ice_vsi_set_num_qs()
233 vsi->alloc_rxq = vf->num_vf_qs; in ice_vsi_set_num_qs()
234 /* pf->vfs.num_msix_per includes (VF miscellaneous vector + in ice_vsi_set_num_qs()
235 * data queue interrupts). Since vsi->num_q_vectors is number in ice_vsi_set_num_qs()
239 vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF; in ice_vsi_set_num_qs()
242 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
243 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
244 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
247 vsi->alloc_txq = 0; in ice_vsi_set_num_qs()
248 vsi->alloc_rxq = 0; in ice_vsi_set_num_qs()
251 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
252 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
263 * ice_get_free_slot - get the next non-NULL location index in array
276 if (curr < (size - 1) && !tmp_array[curr + 1]) { in ice_get_free_slot()
292 * ice_vsi_delete_from_hw - delete a VSI from the switch
297 struct ice_pf *pf = vsi->back; in ice_vsi_delete_from_hw()
306 if (vsi->type == ICE_VSI_VF) in ice_vsi_delete_from_hw()
307 ctxt->vf_num = vsi->vf->vf_id; in ice_vsi_delete_from_hw()
308 ctxt->vsi_num = vsi->vsi_num; in ice_vsi_delete_from_hw()
310 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); in ice_vsi_delete_from_hw()
312 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); in ice_vsi_delete_from_hw()
314 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", in ice_vsi_delete_from_hw()
315 vsi->vsi_num, status); in ice_vsi_delete_from_hw()
321 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
326 struct ice_pf *pf = vsi->back; in ice_vsi_free_arrays()
331 bitmap_free(vsi->af_xdp_zc_qps); in ice_vsi_free_arrays()
332 vsi->af_xdp_zc_qps = NULL; in ice_vsi_free_arrays()
334 devm_kfree(dev, vsi->q_vectors); in ice_vsi_free_arrays()
335 vsi->q_vectors = NULL; in ice_vsi_free_arrays()
336 devm_kfree(dev, vsi->tx_rings); in ice_vsi_free_arrays()
337 vsi->tx_rings = NULL; in ice_vsi_free_arrays()
338 devm_kfree(dev, vsi->rx_rings); in ice_vsi_free_arrays()
339 vsi->rx_rings = NULL; in ice_vsi_free_arrays()
340 devm_kfree(dev, vsi->txq_map); in ice_vsi_free_arrays()
341 vsi->txq_map = NULL; in ice_vsi_free_arrays()
342 devm_kfree(dev, vsi->rxq_map); in ice_vsi_free_arrays()
343 vsi->rxq_map = NULL; in ice_vsi_free_arrays()
347 * ice_vsi_free_stats - Free the ring statistics structures
353 struct ice_pf *pf = vsi->back; in ice_vsi_free_stats()
356 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_free_stats()
358 if (!pf->vsi_stats) in ice_vsi_free_stats()
361 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_vsi_free_stats()
366 if (vsi_stat->tx_ring_stats[i]) { in ice_vsi_free_stats()
367 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); in ice_vsi_free_stats()
368 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); in ice_vsi_free_stats()
373 if (vsi_stat->rx_ring_stats[i]) { in ice_vsi_free_stats()
374 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); in ice_vsi_free_stats()
375 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); in ice_vsi_free_stats()
379 kfree(vsi_stat->tx_ring_stats); in ice_vsi_free_stats()
380 kfree(vsi_stat->rx_ring_stats); in ice_vsi_free_stats()
382 pf->vsi_stats[vsi->idx] = NULL; in ice_vsi_free_stats()
386 * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
394 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_ring_stats()
397 vsi_stats = pf->vsi_stats[vsi->idx]; in ice_vsi_alloc_ring_stats()
398 tx_ring_stats = vsi_stats->tx_ring_stats; in ice_vsi_alloc_ring_stats()
399 rx_ring_stats = vsi_stats->rx_ring_stats; in ice_vsi_alloc_ring_stats()
401 /* Allocate Tx ring stats */ in ice_vsi_alloc_ring_stats()
406 ring = vsi->tx_rings[i]; in ice_vsi_alloc_ring_stats()
417 ring->ring_stats = ring_stats; in ice_vsi_alloc_ring_stats()
425 ring = vsi->rx_rings[i]; in ice_vsi_alloc_ring_stats()
436 ring->ring_stats = ring_stats; in ice_vsi_alloc_ring_stats()
443 return -ENOMEM; in ice_vsi_alloc_ring_stats()
447 * ice_vsi_free - clean up and deallocate the provided VSI
458 if (!vsi || !vsi->back) in ice_vsi_free()
461 pf = vsi->back; in ice_vsi_free()
464 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { in ice_vsi_free()
465 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); in ice_vsi_free()
469 mutex_lock(&pf->sw_mutex); in ice_vsi_free()
472 pf->vsi[vsi->idx] = NULL; in ice_vsi_free()
473 pf->next_vsi = vsi->idx; in ice_vsi_free()
477 mutex_unlock(&pf->sw_mutex); in ice_vsi_free()
488 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
496 if (!q_vector->tx.tx_ring) in ice_msix_clean_ctrl_vsi()
500 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET); in ice_msix_clean_ctrl_vsi()
501 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring); in ice_msix_clean_ctrl_vsi()
507 * ice_msix_clean_rings - MSIX mode Interrupt Handler
515 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) in ice_msix_clean_rings()
518 q_vector->total_events++; in ice_msix_clean_rings()
520 napi_schedule(&q_vector->napi); in ice_msix_clean_rings()
528 struct ice_pf *pf = q_vector->vsi->back; in ice_eswitch_msix_clean_rings()
532 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) in ice_eswitch_msix_clean_rings()
535 xa_for_each(&pf->eswitch.reprs, id, repr) in ice_eswitch_msix_clean_rings()
536 napi_schedule(&repr->q_vector->napi); in ice_eswitch_msix_clean_rings()
542 * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
548 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_stat_arrays()
550 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_alloc_stat_arrays()
552 if (!pf->vsi_stats) in ice_vsi_alloc_stat_arrays()
553 return -ENOENT; in ice_vsi_alloc_stat_arrays()
555 if (pf->vsi_stats[vsi->idx]) in ice_vsi_alloc_stat_arrays()
561 return -ENOMEM; in ice_vsi_alloc_stat_arrays()
563 vsi_stat->tx_ring_stats = in ice_vsi_alloc_stat_arrays()
564 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats), in ice_vsi_alloc_stat_arrays()
566 if (!vsi_stat->tx_ring_stats) in ice_vsi_alloc_stat_arrays()
569 vsi_stat->rx_ring_stats = in ice_vsi_alloc_stat_arrays()
570 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats), in ice_vsi_alloc_stat_arrays()
572 if (!vsi_stat->rx_ring_stats) in ice_vsi_alloc_stat_arrays()
575 pf->vsi_stats[vsi->idx] = vsi_stat; in ice_vsi_alloc_stat_arrays()
580 kfree(vsi_stat->rx_ring_stats); in ice_vsi_alloc_stat_arrays()
582 kfree(vsi_stat->tx_ring_stats); in ice_vsi_alloc_stat_arrays()
584 pf->vsi_stats[vsi->idx] = NULL; in ice_vsi_alloc_stat_arrays()
585 return -ENOMEM; in ice_vsi_alloc_stat_arrays()
589 * ice_vsi_alloc_def - set default values for already allocated VSI
596 if (vsi->type != ICE_VSI_CHNL) { in ice_vsi_alloc_def()
599 return -ENOMEM; in ice_vsi_alloc_def()
602 switch (vsi->type) { in ice_vsi_alloc_def()
605 vsi->irq_handler = ice_eswitch_msix_clean_rings; in ice_vsi_alloc_def()
609 vsi->irq_handler = ice_msix_clean_rings; in ice_vsi_alloc_def()
613 vsi->irq_handler = ice_msix_clean_ctrl_vsi; in ice_vsi_alloc_def()
617 return -EINVAL; in ice_vsi_alloc_def()
619 vsi->num_rxq = ch->num_rxq; in ice_vsi_alloc_def()
620 vsi->num_txq = ch->num_txq; in ice_vsi_alloc_def()
621 vsi->next_base_q = ch->base_q; in ice_vsi_alloc_def()
628 return -EINVAL; in ice_vsi_alloc_def()
635 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
638 * Reserves a VSI index from the PF and allocates an empty VSI structure
650 mutex_lock(&pf->sw_mutex); in ice_vsi_alloc()
653 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index in ice_vsi_alloc()
656 if (pf->next_vsi == ICE_NO_VSI) { in ice_vsi_alloc()
665 vsi->back = pf; in ice_vsi_alloc()
666 set_bit(ICE_VSI_DOWN, vsi->state); in ice_vsi_alloc()
669 vsi->idx = pf->next_vsi; in ice_vsi_alloc()
670 pf->vsi[pf->next_vsi] = vsi; in ice_vsi_alloc()
672 /* prepare pf->next_vsi for next use */ in ice_vsi_alloc()
673 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, in ice_vsi_alloc()
674 pf->next_vsi); in ice_vsi_alloc()
677 mutex_unlock(&pf->sw_mutex); in ice_vsi_alloc()
682 * ice_alloc_fd_res - Allocate FD resource for a VSI
687 * Returns 0 on success, -EPERM on no-op or -EIO on failure
691 struct ice_pf *pf = vsi->back; in ice_alloc_fd_res()
698 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) in ice_alloc_fd_res()
699 return -EPERM; in ice_alloc_fd_res()
701 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF || in ice_alloc_fd_res()
702 vsi->type == ICE_VSI_CHNL)) in ice_alloc_fd_res()
703 return -EPERM; in ice_alloc_fd_res()
706 g_val = pf->hw.func_caps.fd_fltr_guar; in ice_alloc_fd_res()
708 return -EPERM; in ice_alloc_fd_res()
711 b_val = pf->hw.func_caps.fd_fltr_best_effort; in ice_alloc_fd_res()
713 return -EPERM; in ice_alloc_fd_res()
723 if (vsi->type == ICE_VSI_PF) { in ice_alloc_fd_res()
724 vsi->num_gfltr = g_val; in ice_alloc_fd_res()
728 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_alloc_fd_res()
730 return -EPERM; in ice_alloc_fd_res()
732 vsi->num_gfltr = ICE_PF_VSI_GFLTR; in ice_alloc_fd_res()
736 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
737 } else if (vsi->type == ICE_VSI_VF) { in ice_alloc_fd_res()
738 vsi->num_gfltr = 0; in ice_alloc_fd_res()
741 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
748 return -EPERM; in ice_alloc_fd_res()
750 if (!main_vsi->all_numtc) in ice_alloc_fd_res()
751 return -EINVAL; in ice_alloc_fd_res()
754 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC; in ice_alloc_fd_res()
760 return -EPERM; in ice_alloc_fd_res()
762 g_val -= ICE_PF_VSI_GFLTR; in ice_alloc_fd_res()
764 vsi->num_gfltr = g_val / numtc; in ice_alloc_fd_res()
767 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
774 * ice_vsi_get_qs - Assign queues from PF to VSI
781 struct ice_pf *pf = vsi->back; in ice_vsi_get_qs()
783 .qs_mutex = &pf->avail_q_mutex, in ice_vsi_get_qs()
784 .pf_map = pf->avail_txqs, in ice_vsi_get_qs()
785 .pf_map_size = pf->max_pf_txqs, in ice_vsi_get_qs()
786 .q_count = vsi->alloc_txq, in ice_vsi_get_qs()
788 .vsi_map = vsi->txq_map, in ice_vsi_get_qs()
793 .qs_mutex = &pf->avail_q_mutex, in ice_vsi_get_qs()
794 .pf_map = pf->avail_rxqs, in ice_vsi_get_qs()
795 .pf_map_size = pf->max_pf_rxqs, in ice_vsi_get_qs()
796 .q_count = vsi->alloc_rxq, in ice_vsi_get_qs()
798 .vsi_map = vsi->rxq_map, in ice_vsi_get_qs()
804 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_get_qs()
810 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
815 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
821 * ice_vsi_put_qs - Release queues from VSI to PF
826 struct ice_pf *pf = vsi->back; in ice_vsi_put_qs()
829 mutex_lock(&pf->avail_q_mutex); in ice_vsi_put_qs()
832 clear_bit(vsi->txq_map[i], pf->avail_txqs); in ice_vsi_put_qs()
833 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
837 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); in ice_vsi_put_qs()
838 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
841 mutex_unlock(&pf->avail_q_mutex); in ice_vsi_put_qs()
852 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_is_safe_mode()
863 return test_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_is_rdma_ena()
867 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
875 struct ice_pf *pf = vsi->back; in ice_vsi_clean_rss_flow_fld()
881 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); in ice_vsi_clean_rss_flow_fld()
884 vsi->vsi_num, status); in ice_vsi_clean_rss_flow_fld()
888 * ice_rss_clean - Delete RSS related VSI structures and configuration
893 struct ice_pf *pf = vsi->back; in ice_rss_clean()
898 devm_kfree(dev, vsi->rss_hkey_user); in ice_rss_clean()
899 devm_kfree(dev, vsi->rss_lut_user); in ice_rss_clean()
904 ice_rem_vsi_rss_list(&pf->hw, vsi->idx); in ice_rss_clean()
908 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
914 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_params()
917 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_set_rss_params()
918 vsi->rss_size = 1; in ice_vsi_set_rss_params()
922 cap = &pf->hw.func_caps.common_cap; in ice_vsi_set_rss_params()
923 max_rss_size = BIT(cap->rss_table_entry_width); in ice_vsi_set_rss_params()
924 switch (vsi->type) { in ice_vsi_set_rss_params()
928 vsi->rss_table_size = (u16)cap->rss_table_size; in ice_vsi_set_rss_params()
929 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_set_rss_params()
930 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); in ice_vsi_set_rss_params()
932 vsi->rss_size = min_t(u16, num_online_cpus(), in ice_vsi_set_rss_params()
934 vsi->rss_lut_type = ICE_LUT_PF; in ice_vsi_set_rss_params()
937 vsi->rss_table_size = ICE_LUT_VSI_SIZE; in ice_vsi_set_rss_params()
938 vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); in ice_vsi_set_rss_params()
939 vsi->rss_lut_type = ICE_LUT_VSI; in ice_vsi_set_rss_params()
945 vsi->rss_table_size = ICE_LUT_VSI_SIZE; in ice_vsi_set_rss_params()
946 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; in ice_vsi_set_rss_params()
947 vsi->rss_lut_type = ICE_LUT_VSI; in ice_vsi_set_rss_params()
953 ice_vsi_type_str(vsi->type)); in ice_vsi_set_rss_params()
959 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
969 memset(&ctxt->info, 0, sizeof(ctxt->info)); in ice_set_dflt_vsi_ctx()
971 ctxt->alloc_from_pool = true; in ice_set_dflt_vsi_ctx()
973 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; in ice_set_dflt_vsi_ctx()
975 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; in ice_set_dflt_vsi_ctx()
976 /* allow all untagged/tagged packets by default on Tx */ in ice_set_dflt_vsi_ctx()
977 ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M, in ice_set_dflt_vsi_ctx()
979 /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which in ice_set_dflt_vsi_ctx()
982 * DVM - leave inner VLAN in packet by default in ice_set_dflt_vsi_ctx()
985 ctxt->info.inner_vlan_flags |= in ice_set_dflt_vsi_ctx()
988 ctxt->info.outer_vlan_flags = in ice_set_dflt_vsi_ctx()
991 ctxt->info.outer_vlan_flags |= in ice_set_dflt_vsi_ctx()
994 ctxt->info.outer_vlan_flags |= in ice_set_dflt_vsi_ctx()
1007 ctxt->info.ingress_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
1008 ctxt->info.egress_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
1010 ctxt->info.outer_up_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
1015 * ice_vsi_setup_q_map - Setup a VSI queue map
1023 u16 qcount_tx = vsi->alloc_txq; in ice_vsi_setup_q_map()
1024 u16 qcount_rx = vsi->alloc_rxq; in ice_vsi_setup_q_map()
1028 if (!vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map()
1030 vsi->tc_cfg.numtc = 1; in ice_vsi_setup_q_map()
1031 vsi->tc_cfg.ena_tc = 1; in ice_vsi_setup_q_map()
1034 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); in ice_vsi_setup_q_map()
1037 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; in ice_vsi_setup_q_map()
1041 /* find the (rounded up) power-of-2 of qcount */ in ice_vsi_setup_q_map()
1047 * queues allocated to TC0. No:of queues is a power-of-2. in ice_vsi_setup_q_map()
1056 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map()
1058 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map()
1059 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map()
1060 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map()
1061 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map()
1062 ctxt->info.tc_mapping[i] = 0; in ice_vsi_setup_q_map()
1067 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map()
1068 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; in ice_vsi_setup_q_map()
1069 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; in ice_vsi_setup_q_map()
1070 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map()
1076 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in ice_vsi_setup_q_map()
1079 /* if offset is non-zero, means it is calculated correctly based on in ice_vsi_setup_q_map()
1081 * be correct and non-zero because it is based off - VSI's in ice_vsi_setup_q_map()
1090 if (rx_count > vsi->alloc_rxq) { in ice_vsi_setup_q_map()
1091 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1092 rx_count, vsi->alloc_rxq); in ice_vsi_setup_q_map()
1093 return -EINVAL; in ice_vsi_setup_q_map()
1096 if (tx_count > vsi->alloc_txq) { in ice_vsi_setup_q_map()
1097 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1098 tx_count, vsi->alloc_txq); in ice_vsi_setup_q_map()
1099 return -EINVAL; in ice_vsi_setup_q_map()
1102 vsi->num_txq = tx_count; in ice_vsi_setup_q_map()
1103 vsi->num_rxq = rx_count; in ice_vsi_setup_q_map()
1105 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { in ice_vsi_setup_q_map()
1106 …dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence makin… in ice_vsi_setup_q_map()
1110 vsi->num_txq = vsi->num_rxq; in ice_vsi_setup_q_map()
1114 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); in ice_vsi_setup_q_map()
1119 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map()
1120 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); in ice_vsi_setup_q_map()
1126 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
1135 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && in ice_set_fd_vsi_ctx()
1136 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL) in ice_set_fd_vsi_ctx()
1140 ctxt->info.valid_sections |= cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1148 ctxt->info.fd_options = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1150 ctxt->info.max_fd_fltr_dedicated = in ice_set_fd_vsi_ctx()
1151 cpu_to_le16(vsi->num_gfltr); in ice_set_fd_vsi_ctx()
1153 ctxt->info.max_fd_fltr_shared = in ice_set_fd_vsi_ctx()
1154 cpu_to_le16(vsi->num_bfltr); in ice_set_fd_vsi_ctx()
1159 ctxt->info.fd_def_q = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1164 ctxt->info.fd_report_opt = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1168 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
1178 pf = vsi->back; in ice_set_rss_vsi_ctx()
1181 switch (vsi->type) { in ice_set_rss_vsi_ctx()
1193 ice_vsi_type_str(vsi->type)); in ice_set_rss_vsi_ctx()
1198 vsi->rss_hfunc = hash_type; in ice_set_rss_vsi_ctx()
1200 ctxt->info.q_opt_rss = in ice_set_rss_vsi_ctx()
1208 struct ice_pf *pf = vsi->back; in ice_chnl_vsi_setup_q_map()
1213 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); in ice_chnl_vsi_setup_q_map()
1219 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in ice_chnl_vsi_setup_q_map()
1220 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); in ice_chnl_vsi_setup_q_map()
1221 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q); in ice_chnl_vsi_setup_q_map()
1222 ctxt->info.q_mapping[1] = cpu_to_le16(qcount); in ice_chnl_vsi_setup_q_map()
1226 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
1233 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; in ice_vsi_is_vlan_pruning_ena()
1237 * ice_vsi_init - Create and initialize a VSI
1249 struct ice_pf *pf = vsi->back; in ice_vsi_init()
1250 struct ice_hw *hw = &pf->hw; in ice_vsi_init()
1258 return -ENOMEM; in ice_vsi_init()
1260 switch (vsi->type) { in ice_vsi_init()
1264 ctxt->flags = ICE_AQ_VSI_TYPE_PF; in ice_vsi_init()
1268 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; in ice_vsi_init()
1271 ctxt->flags = ICE_AQ_VSI_TYPE_VF; in ice_vsi_init()
1272 /* VF number here is the absolute VF number (0-255) */ in ice_vsi_init()
1273 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; in ice_vsi_init()
1276 ret = -ENODEV; in ice_vsi_init()
1283 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_init()
1288 ctxt->info.sw_flags2 |= in ice_vsi_init()
1291 ctxt->info.sw_flags2 &= in ice_vsi_init()
1296 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) in ice_vsi_init()
1299 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) in ice_vsi_init()
1300 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; in ice_vsi_init()
1303 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && in ice_vsi_init()
1304 vsi->type != ICE_VSI_CTRL) { in ice_vsi_init()
1310 ctxt->info.valid_sections |= in ice_vsi_init()
1314 ctxt->info.sw_id = vsi->port_info->sw_id; in ice_vsi_init()
1315 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_init()
1327 ctxt->info.valid_sections |= in ice_vsi_init()
1332 if (vsi->type == ICE_VSI_PF) { in ice_vsi_init()
1333 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; in ice_vsi_init()
1334 ctxt->info.valid_sections |= in ice_vsi_init()
1339 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1342 ret = -EIO; in ice_vsi_init()
1346 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1349 ret = -EIO; in ice_vsi_init()
1355 vsi->info = ctxt->info; in ice_vsi_init()
1358 vsi->vsi_num = ctxt->vsi_num; in ice_vsi_init()
1366 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1367 * @vsi: the VSI having rings deallocated
1374 if (vsi->q_vectors) { in ice_vsi_clear_rings()
1376 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_clear_rings()
1379 q_vector->tx.tx_ring = NULL; in ice_vsi_clear_rings()
1380 q_vector->rx.rx_ring = NULL; in ice_vsi_clear_rings()
1385 if (vsi->tx_rings) { in ice_vsi_clear_rings()
1387 if (vsi->tx_rings[i]) { in ice_vsi_clear_rings()
1388 kfree_rcu(vsi->tx_rings[i], rcu); in ice_vsi_clear_rings()
1389 WRITE_ONCE(vsi->tx_rings[i], NULL); in ice_vsi_clear_rings()
1393 if (vsi->rx_rings) { in ice_vsi_clear_rings()
1395 if (vsi->rx_rings[i]) { in ice_vsi_clear_rings()
1396 kfree_rcu(vsi->rx_rings[i], rcu); in ice_vsi_clear_rings()
1397 WRITE_ONCE(vsi->rx_rings[i], NULL); in ice_vsi_clear_rings()
1404 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1405 * @vsi: VSI which is having rings allocated
1409 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); in ice_vsi_alloc_rings()
1410 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_rings()
1415 /* Allocate Tx rings */ in ice_vsi_alloc_rings()
1425 ring->q_index = i; in ice_vsi_alloc_rings()
1426 ring->reg_idx = vsi->txq_map[i]; in ice_vsi_alloc_rings()
1427 ring->vsi = vsi; in ice_vsi_alloc_rings()
1428 ring->tx_tstamps = &pf->ptp.port.tx; in ice_vsi_alloc_rings()
1429 ring->dev = dev; in ice_vsi_alloc_rings()
1430 ring->count = vsi->num_tx_desc; in ice_vsi_alloc_rings()
1431 ring->txq_teid = ICE_INVAL_TEID; in ice_vsi_alloc_rings()
1433 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2; in ice_vsi_alloc_rings()
1435 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1; in ice_vsi_alloc_rings()
1436 WRITE_ONCE(vsi->tx_rings[i], ring); in ice_vsi_alloc_rings()
1439 /* Allocate Rx rings */ in ice_vsi_alloc_rings()
1448 ring->q_index = i; in ice_vsi_alloc_rings()
1449 ring->reg_idx = vsi->rxq_map[i]; in ice_vsi_alloc_rings()
1450 ring->vsi = vsi; in ice_vsi_alloc_rings()
1451 ring->netdev = vsi->netdev; in ice_vsi_alloc_rings()
1452 ring->dev = dev; in ice_vsi_alloc_rings()
1453 ring->count = vsi->num_rx_desc; in ice_vsi_alloc_rings()
1454 ring->cached_phctime = pf->ptp.cached_phc_time; in ice_vsi_alloc_rings()
1455 WRITE_ONCE(vsi->rx_rings[i], ring); in ice_vsi_alloc_rings()
1462 return -ENOMEM; in ice_vsi_alloc_rings()
1466 * ice_vsi_manage_rss_lut - disable/enable RSS
1478 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_manage_rss_lut()
1483 if (vsi->rss_lut_user) in ice_vsi_manage_rss_lut()
1484 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1486 ice_fill_rss_lut(lut, vsi->rss_table_size, in ice_vsi_manage_rss_lut()
1487 vsi->rss_size); in ice_vsi_manage_rss_lut()
1490 ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1495 * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
1505 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; in ice_vsi_cfg_crc_strip()
1507 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; in ice_vsi_cfg_crc_strip()
1511 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1516 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_rss_lut_key()
1522 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && in ice_vsi_cfg_rss_lut_key()
1523 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) { in ice_vsi_cfg_rss_lut_key()
1524 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); in ice_vsi_cfg_rss_lut_key()
1526 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); in ice_vsi_cfg_rss_lut_key()
1530 * orig_rss_size so that when tc-qdisc is deleted, main VSI in ice_vsi_cfg_rss_lut_key()
1532 * to begin with (prior to setup-tc for ADQ config) in ice_vsi_cfg_rss_lut_key()
1534 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size && in ice_vsi_cfg_rss_lut_key()
1535 vsi->orig_rss_size <= vsi->num_rxq) { in ice_vsi_cfg_rss_lut_key()
1536 vsi->rss_size = vsi->orig_rss_size; in ice_vsi_cfg_rss_lut_key()
1538 vsi->orig_rss_size = 0; in ice_vsi_cfg_rss_lut_key()
1542 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_cfg_rss_lut_key()
1544 return -ENOMEM; in ice_vsi_cfg_rss_lut_key()
1546 if (vsi->rss_lut_user) in ice_vsi_cfg_rss_lut_key()
1547 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1549 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); in ice_vsi_cfg_rss_lut_key()
1551 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1559 err = -ENOMEM; in ice_vsi_cfg_rss_lut_key()
1563 if (vsi->rss_hkey_user) in ice_vsi_cfg_rss_lut_key()
1564 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); in ice_vsi_cfg_rss_lut_key()
1579 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1588 struct ice_pf *pf = vsi->back; in ice_vsi_set_vf_rss_flow_fld()
1595 vsi->vsi_num); in ice_vsi_set_vf_rss_flow_fld()
1599 status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA); in ice_vsi_set_vf_rss_flow_fld()
1602 vsi->vsi_num, status); in ice_vsi_set_vf_rss_flow_fld()
1616 /* configure RSS for sctp4 with input set IP src/dst - only support
1617 * RSS on SCTPv4 on outer headers (non-tunneled)
1627 /* configure RSS for sctp6 with input set IPv6 src/dst - only support
1628 * RSS on SCTPv6 on outer headers (non-tunneled)
1638 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1650 u16 vsi_num = vsi->vsi_num; in ice_vsi_set_rss_flow_fld()
1651 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_flow_fld()
1652 struct ice_hw *hw = &pf->hw; in ice_vsi_set_rss_flow_fld()
1669 cfg->addl_hdrs, cfg->hash_flds, in ice_vsi_set_rss_flow_fld()
1670 cfg->hdr_type, cfg->symm); in ice_vsi_set_rss_flow_fld()
1675 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
1680 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { in ice_vsi_cfg_frame_size()
1681 vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; in ice_vsi_cfg_frame_size()
1682 vsi->rx_buf_len = ICE_RXBUF_1664; in ice_vsi_cfg_frame_size()
1685 (vsi->netdev->mtu <= ETH_DATA_LEN)) { in ice_vsi_cfg_frame_size()
1686 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; in ice_vsi_cfg_frame_size()
1687 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; in ice_vsi_cfg_frame_size()
1690 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; in ice_vsi_cfg_frame_size()
1691 vsi->rx_buf_len = ICE_RXBUF_3072; in ice_vsi_cfg_frame_size()
1696 * ice_pf_state_is_nominal - checks the PF for nominal state
1713 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) in ice_pf_state_is_nominal()
1720 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1726 struct ice_hw *hw = &vsi->back->hw; in ice_update_eth_stats()
1727 struct ice_pf *pf = vsi->back; in ice_update_eth_stats()
1728 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ in ice_update_eth_stats()
1730 prev_es = &vsi->eth_stats_prev; in ice_update_eth_stats()
1731 cur_es = &vsi->eth_stats; in ice_update_eth_stats()
1733 if (ice_is_reset_in_progress(pf->state)) in ice_update_eth_stats()
1734 vsi->stat_offsets_loaded = false; in ice_update_eth_stats()
1736 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1737 &prev_es->rx_bytes, &cur_es->rx_bytes); in ice_update_eth_stats()
1739 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1740 &prev_es->rx_unicast, &cur_es->rx_unicast); in ice_update_eth_stats()
1742 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1743 &prev_es->rx_multicast, &cur_es->rx_multicast); in ice_update_eth_stats()
1745 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1746 &prev_es->rx_broadcast, &cur_es->rx_broadcast); in ice_update_eth_stats()
1748 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1749 &prev_es->rx_discards, &cur_es->rx_discards); in ice_update_eth_stats()
1751 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1752 &prev_es->tx_bytes, &cur_es->tx_bytes); in ice_update_eth_stats()
1754 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1755 &prev_es->tx_unicast, &cur_es->tx_unicast); in ice_update_eth_stats()
1757 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1758 &prev_es->tx_multicast, &cur_es->tx_multicast); in ice_update_eth_stats()
1760 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1761 &prev_es->tx_broadcast, &cur_es->tx_broadcast); in ice_update_eth_stats()
1763 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1764 &prev_es->tx_errors, &cur_es->tx_errors); in ice_update_eth_stats()
1766 vsi->stat_offsets_loaded = true; in ice_update_eth_stats()
1770 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1800 if (q_idx >= vsi->num_rxq) in ice_vsi_cfg_single_rxq()
1801 return -EINVAL; in ice_vsi_cfg_single_rxq()
1803 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); in ice_vsi_cfg_single_rxq()
1810 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) in ice_vsi_cfg_single_txq()
1811 return -EINVAL; in ice_vsi_cfg_single_txq()
1813 qg_buf->num_txqs = 1; in ice_vsi_cfg_single_txq()
1819 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
1829 if (vsi->type == ICE_VSI_VF) in ice_vsi_cfg_rxqs()
1834 /* set up individual rings */ in ice_vsi_cfg_rxqs()
1836 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); in ice_vsi_cfg_rxqs()
1846 * ice_vsi_cfg_txqs - Configure the VSI for Tx
1848 * @rings: Tx ring array to be configured
1849 * @count: number of Tx ring array elements
1852 * Configure the Tx VSI for operation.
1855 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) in ice_vsi_cfg_txqs() argument
1861 qg_buf->num_txqs = 1; in ice_vsi_cfg_txqs()
1864 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); in ice_vsi_cfg_txqs()
1873 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1877 * Configure the Tx VSI for operation.
1881 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); in ice_vsi_cfg_lan_txqs()
1885 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1889 * Configure the Tx queues dedicated for XDP in given VSI for operation.
1896 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_cfg_xdp_txqs()
1907 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1924 * ice_write_intrl - write throttle rate limit to interrupt specific register
1930 struct ice_hw *hw = &q_vector->vsi->back->hw; in ice_write_intrl()
1932 wr32(hw, GLINT_RATE(q_vector->reg_idx), in ice_write_intrl()
1938 switch (rc->type) { in ice_pull_qvec_from_rc()
1940 if (rc->rx_ring) in ice_pull_qvec_from_rc()
1941 return rc->rx_ring->q_vector; in ice_pull_qvec_from_rc()
1944 if (rc->tx_ring) in ice_pull_qvec_from_rc()
1945 return rc->tx_ring->q_vector; in ice_pull_qvec_from_rc()
1955 * __ice_write_itr - write throttle rate to register
1963 struct ice_hw *hw = &q_vector->vsi->back->hw; in __ice_write_itr()
1965 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), in __ice_write_itr()
1970 * ice_write_itr - write throttle rate to queue specific register
1986 * ice_set_q_vector_intrl - set up interrupt rate limiting
1989 * Interrupt rate limiting is local to the vector, not per-queue so we must
1997 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) { in ice_set_q_vector_intrl()
2006 ice_write_intrl(q_vector, q_vector->intrl); in ice_set_q_vector_intrl()
2011 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
2019 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_msix()
2020 struct ice_hw *hw = &pf->hw; in ice_vsi_cfg_msix()
2025 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_cfg_msix()
2026 u16 reg_idx = q_vector->reg_idx; in ice_vsi_cfg_msix()
2035 * For SR-IOV VF VSIs queue vector index always starts in ice_vsi_cfg_msix()
2041 for (q = 0; q < q_vector->num_ring_tx; q++) { in ice_vsi_cfg_msix()
2043 q_vector->tx.itr_idx); in ice_vsi_cfg_msix()
2047 for (q = 0; q < q_vector->num_ring_rx; q++) { in ice_vsi_cfg_msix()
2049 q_vector->rx.itr_idx); in ice_vsi_cfg_msix()
2056 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
2057 * @vsi: the VSI whose rings are to be enabled
2067 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
2068 * @vsi: the VSI whose rings are to be disabled
2078 * ice_vsi_stop_tx_rings - Disable Tx rings
2082 * @rings: Tx ring array to be stopped
2083 * @count: number of Tx ring array elements
2087 u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count) in ice_vsi_stop_tx_rings() argument
2091 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) in ice_vsi_stop_tx_rings()
2092 return -EINVAL; in ice_vsi_stop_tx_rings()
2098 if (!rings || !rings[q_idx]) in ice_vsi_stop_tx_rings()
2099 return -EINVAL; in ice_vsi_stop_tx_rings()
2101 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings()
2103 rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings()
2113 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
2122 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); in ice_vsi_stop_lan_tx_rings()
2126 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
2131 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_stop_xdp_tx_rings()
2142 struct ice_pf *pf = vsi->back; in ice_vsi_is_rx_queue_active()
2143 struct ice_hw *hw = &pf->hw; in ice_vsi_is_rx_queue_active()
2150 pf_q = vsi->rxq_map[i]; in ice_vsi_is_rx_queue_active()
2161 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { in ice_vsi_set_tc_cfg()
2162 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; in ice_vsi_set_tc_cfg()
2163 vsi->tc_cfg.numtc = 1; in ice_vsi_set_tc_cfg()
2172 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2174 * @tx: bool to determine Tx or Rx rule
2177 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) in ice_cfg_sw_lldp() argument
2181 struct ice_pf *pf = vsi->back; in ice_cfg_sw_lldp()
2188 if (tx) { in ice_cfg_sw_lldp()
2192 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { in ice_cfg_sw_lldp()
2193 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, in ice_cfg_sw_lldp()
2203 create ? "adding" : "removing", tx ? "TX" : "RX", in ice_cfg_sw_lldp()
2204 vsi->vsi_num, status); in ice_cfg_sw_lldp()
2208 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2216 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_agg_vsi()
2222 struct ice_pf *pf = vsi->back; in ice_set_agg_vsi()
2228 * - PF aggregator node to contains VSIs of type _PF and _CTRL in ice_set_agg_vsi()
2229 * - VF aggregator nodes will contain VF VSI in ice_set_agg_vsi()
2231 port_info = pf->hw.port_info; in ice_set_agg_vsi()
2235 switch (vsi->type) { in ice_set_agg_vsi()
2243 agg_node_iter = &pf->pf_agg_node[0]; in ice_set_agg_vsi()
2254 agg_node_iter = &pf->vf_agg_node[0]; in ice_set_agg_vsi()
2259 ice_vsi_type_str(vsi->type)); in ice_set_agg_vsi()
2268 if (agg_node_iter->num_vsis && in ice_set_agg_vsi()
2269 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { in ice_set_agg_vsi()
2274 if (agg_node_iter->valid && in ice_set_agg_vsi()
2275 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) { in ice_set_agg_vsi()
2276 agg_id = agg_node_iter->agg_id; in ice_set_agg_vsi()
2282 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) { in ice_set_agg_vsi()
2295 if (!agg_node->valid) { in ice_set_agg_vsi()
2297 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2304 agg_node->valid = true; in ice_set_agg_vsi()
2305 agg_node->agg_id = agg_id; in ice_set_agg_vsi()
2309 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, in ice_set_agg_vsi()
2310 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2313 vsi->idx, agg_id); in ice_set_agg_vsi()
2318 agg_node->num_vsis++; in ice_set_agg_vsi()
2320 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved in ice_set_agg_vsi()
2323 vsi->agg_node = agg_node; in ice_set_agg_vsi()
2325 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, in ice_set_agg_vsi()
2326 vsi->agg_node->num_vsis); in ice_set_agg_vsi()
2337 if (!(vsi->tc_cfg.ena_tc & BIT(i))) in ice_vsi_cfg_tc_lan()
2340 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_cfg_tc_lan()
2341 if (!vsi->alloc_txq && vsi->num_txq) in ice_vsi_cfg_tc_lan()
2342 max_txqs[i] = vsi->num_txq; in ice_vsi_cfg_tc_lan()
2344 max_txqs[i] = pf->num_lan_tx; in ice_vsi_cfg_tc_lan()
2346 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc_lan()
2349 if (vsi->type == ICE_VSI_PF) in ice_vsi_cfg_tc_lan()
2350 max_txqs[i] += vsi->num_xdp_txq; in ice_vsi_cfg_tc_lan()
2353 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); in ice_vsi_cfg_tc_lan()
2354 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_vsi_cfg_tc_lan()
2358 vsi->vsi_num, ret); in ice_vsi_cfg_tc_lan()
2366 * ice_vsi_cfg_def - configure default VSI based on the type
2373 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_cfg_def()
2374 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_def()
2377 vsi->vsw = pf->first_sw; in ice_vsi_cfg_def()
2379 ret = ice_vsi_alloc_def(vsi, params->ch); in ice_vsi_cfg_def()
2383 /* allocate memory for Tx/Rx ring stat pointers */ in ice_vsi_cfg_def()
2392 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", in ice_vsi_cfg_def()
2393 vsi->idx); in ice_vsi_cfg_def()
2404 ret = ice_vsi_init(vsi, params->flags); in ice_vsi_cfg_def()
2410 switch (vsi->type) { in ice_vsi_cfg_def()
2428 /* Associate q_vector rings to napi */ in ice_vsi_cfg_def()
2431 vsi->stat_offsets_loaded = false; in ice_vsi_cfg_def()
2437 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); in ice_vsi_cfg_def()
2443 if (vsi->type != ICE_VSI_CTRL) in ice_vsi_cfg_def()
2448 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_cfg_def()
2455 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_cfg_def()
2478 vsi->stat_offsets_loaded = false; in ice_vsi_cfg_def()
2484 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_cfg_def()
2501 ret = -EINVAL; in ice_vsi_cfg_def()
2523 * ice_vsi_cfg - configure a previously allocated VSI
2529 struct ice_pf *pf = vsi->back; in ice_vsi_cfg()
2532 if (WARN_ON(params->type == ICE_VSI_VF && !params->vf)) in ice_vsi_cfg()
2533 return -EINVAL; in ice_vsi_cfg()
2535 vsi->type = params->type; in ice_vsi_cfg()
2536 vsi->port_info = params->pi; in ice_vsi_cfg()
2539 vsi->vf = params->vf; in ice_vsi_cfg()
2545 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); in ice_vsi_cfg()
2549 if (vsi->type == ICE_VSI_CTRL) { in ice_vsi_cfg()
2550 if (vsi->vf) { in ice_vsi_cfg()
2551 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); in ice_vsi_cfg()
2552 vsi->vf->ctrl_vsi_idx = vsi->idx; in ice_vsi_cfg()
2554 WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI); in ice_vsi_cfg()
2555 pf->ctrl_vsi_idx = vsi->idx; in ice_vsi_cfg()
2563 * ice_vsi_decfg - remove all VSI configuration
2568 struct ice_pf *pf = vsi->back; in ice_vsi_decfg()
2574 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && in ice_vsi_decfg()
2575 !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) in ice_vsi_decfg()
2578 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); in ice_vsi_decfg()
2579 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); in ice_vsi_decfg()
2582 vsi->vsi_num, err); in ice_vsi_decfg()
2595 /* SR-IOV determines needed MSIX resources all at once instead of per in ice_vsi_decfg()
2597 * many interrupts each VF needs. SR-IOV MSIX resources are also in ice_vsi_decfg()
2601 if (vsi->type == ICE_VSI_VF && in ice_vsi_decfg()
2602 vsi->agg_node && vsi->agg_node->valid) in ice_vsi_decfg()
2603 vsi->agg_node->num_vsis--; in ice_vsi_decfg()
2607 * ice_vsi_setup - Set up a VSI by a given type
2626 if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || in ice_vsi_setup()
2627 WARN_ON(!params->pi)) in ice_vsi_setup()
2640 /* Add switch rule to drop all Tx Flow Control Frames, of look up in ice_vsi_setup()
2645 * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to in ice_vsi_setup()
2649 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { in ice_vsi_setup()
2655 if (!vsi->agg_node) in ice_vsi_setup()
2667 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2672 struct ice_pf *pf = vsi->back; in ice_vsi_release_msix()
2673 struct ice_hw *hw = &pf->hw; in ice_vsi_release_msix()
2679 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_release_msix()
2682 for (q = 0; q < q_vector->num_ring_tx; q++) { in ice_vsi_release_msix()
2683 ice_write_itr(&q_vector->tx, 0); in ice_vsi_release_msix()
2684 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); in ice_vsi_release_msix()
2686 u32 xdp_txq = txq + vsi->num_xdp_txq; in ice_vsi_release_msix()
2688 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); in ice_vsi_release_msix()
2693 for (q = 0; q < q_vector->num_ring_rx; q++) { in ice_vsi_release_msix()
2694 ice_write_itr(&q_vector->rx, 0); in ice_vsi_release_msix()
2695 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); in ice_vsi_release_msix()
2704 * ice_vsi_free_irq - Free the IRQ association with the OS
2709 struct ice_pf *pf = vsi->back; in ice_vsi_free_irq()
2712 if (!vsi->q_vectors || !vsi->irqs_ready) in ice_vsi_free_irq()
2716 if (vsi->type == ICE_VSI_VF) in ice_vsi_free_irq()
2719 vsi->irqs_ready = false; in ice_vsi_free_irq()
2725 irq_num = vsi->q_vectors[i]->irq.virq; in ice_vsi_free_irq()
2728 if (!vsi->q_vectors[i] || in ice_vsi_free_irq()
2729 !(vsi->q_vectors[i]->num_ring_tx || in ice_vsi_free_irq()
2730 vsi->q_vectors[i]->num_ring_rx)) in ice_vsi_free_irq()
2740 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); in ice_vsi_free_irq()
2745 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2752 if (!vsi->tx_rings) in ice_vsi_free_tx_rings()
2756 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_vsi_free_tx_rings()
2757 ice_free_tx_ring(vsi->tx_rings[i]); in ice_vsi_free_tx_rings()
2761 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2768 if (!vsi->rx_rings) in ice_vsi_free_rx_rings()
2772 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) in ice_vsi_free_rx_rings()
2773 ice_free_rx_ring(vsi->rx_rings[i]); in ice_vsi_free_rx_rings()
2777 * ice_vsi_close - Shut down a VSI
2782 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) in ice_vsi_close()
2791 * ice_ena_vsi - resume a VSI
2799 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) in ice_ena_vsi()
2802 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_ena_vsi()
2804 if (vsi->netdev && vsi->type == ICE_VSI_PF) { in ice_ena_vsi()
2805 if (netif_running(vsi->netdev)) { in ice_ena_vsi()
2809 err = ice_open_internal(vsi->netdev); in ice_ena_vsi()
2814 } else if (vsi->type == ICE_VSI_CTRL) { in ice_ena_vsi()
2822 * ice_dis_vsi - pause a VSI
2828 if (test_bit(ICE_VSI_DOWN, vsi->state)) in ice_dis_vsi()
2831 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_dis_vsi()
2833 if (vsi->type == ICE_VSI_PF && vsi->netdev) { in ice_dis_vsi()
2834 if (netif_running(vsi->netdev)) { in ice_dis_vsi()
2845 } else if (vsi->type == ICE_VSI_CTRL || in ice_dis_vsi()
2846 vsi->type == ICE_VSI_SWITCHDEV_CTRL) { in ice_dis_vsi()
2852 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
2853 * @vsi: the VSI being un-configured
2857 struct ice_pf *pf = vsi->back; in ice_vsi_dis_irq()
2858 struct ice_hw *hw = &pf->hw; in ice_vsi_dis_irq()
2863 if (vsi->tx_rings) { in ice_vsi_dis_irq()
2865 if (vsi->tx_rings[i]) { in ice_vsi_dis_irq()
2868 reg = vsi->tx_rings[i]->reg_idx; in ice_vsi_dis_irq()
2876 if (vsi->rx_rings) { in ice_vsi_dis_irq()
2878 if (vsi->rx_rings[i]) { in ice_vsi_dis_irq()
2881 reg = vsi->rx_rings[i]->reg_idx; in ice_vsi_dis_irq()
2891 if (!vsi->q_vectors[i]) in ice_vsi_dis_irq()
2893 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); in ice_vsi_dis_irq()
2899 if (vsi->type == ICE_VSI_VF) in ice_vsi_dis_irq()
2903 synchronize_irq(vsi->q_vectors[i]->irq.virq); in ice_vsi_dis_irq()
2907 * __ice_queue_set_napi - Set the napi instance for the queue
2910 * @type: queue type as RX or TX
2929 * ice_queue_set_napi - Set the napi instance for the queue
2932 * @type: queue type as RX or TX
2942 struct ice_pf *pf = vsi->back; in ice_queue_set_napi()
2944 if (!vsi->netdev) in ice_queue_set_napi()
2947 if (current_work() == &pf->serv_task || in ice_queue_set_napi()
2948 test_bit(ICE_PREPARED_FOR_RESET, pf->state) || in ice_queue_set_napi()
2949 test_bit(ICE_DOWN, pf->state) || in ice_queue_set_napi()
2950 test_bit(ICE_SUSPENDED, pf->state)) in ice_queue_set_napi()
2951 __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, in ice_queue_set_napi()
2954 __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, in ice_queue_set_napi()
2959 * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
2971 ice_for_each_rx_ring(rx_ring, q_vector->rx) in __ice_q_vector_set_napi_queues()
2972 __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, in __ice_q_vector_set_napi_queues()
2973 NETDEV_QUEUE_TYPE_RX, &q_vector->napi, in __ice_q_vector_set_napi_queues()
2976 ice_for_each_tx_ring(tx_ring, q_vector->tx) in __ice_q_vector_set_napi_queues()
2977 __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, in __ice_q_vector_set_napi_queues()
2978 NETDEV_QUEUE_TYPE_TX, &q_vector->napi, in __ice_q_vector_set_napi_queues()
2981 netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); in __ice_q_vector_set_napi_queues()
2985 * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
2995 ice_for_each_rx_ring(rx_ring, q_vector->rx) in ice_q_vector_set_napi_queues()
2996 ice_queue_set_napi(q_vector->vsi, rx_ring->q_index, in ice_q_vector_set_napi_queues()
2997 NETDEV_QUEUE_TYPE_RX, &q_vector->napi); in ice_q_vector_set_napi_queues()
2999 ice_for_each_tx_ring(tx_ring, q_vector->tx) in ice_q_vector_set_napi_queues()
3000 ice_queue_set_napi(q_vector->vsi, tx_ring->q_index, in ice_q_vector_set_napi_queues()
3001 NETDEV_QUEUE_TYPE_TX, &q_vector->napi); in ice_q_vector_set_napi_queues()
3003 netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); in ice_q_vector_set_napi_queues()
3016 if (!vsi->netdev) in ice_vsi_set_napi_queues()
3020 ice_q_vector_set_napi_queues(vsi->q_vectors[i]); in ice_vsi_set_napi_queues()
3024 * ice_vsi_release - Delete a VSI and free its resources
3033 if (!vsi->back) in ice_vsi_release()
3034 return -ENODEV; in ice_vsi_release()
3035 pf = vsi->back; in ice_vsi_release()
3037 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) in ice_vsi_release()
3047 if (!ice_is_reset_in_progress(pf->state)) in ice_vsi_release()
3054 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
3067 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_rebuild_get_coalesce()
3069 coalesce[i].itr_tx = q_vector->tx.itr_settings; in ice_vsi_rebuild_get_coalesce()
3070 coalesce[i].itr_rx = q_vector->rx.itr_settings; in ice_vsi_rebuild_get_coalesce()
3071 coalesce[i].intrl = q_vector->intrl; in ice_vsi_rebuild_get_coalesce()
3073 if (i < vsi->num_txq) in ice_vsi_rebuild_get_coalesce()
3075 if (i < vsi->num_rxq) in ice_vsi_rebuild_get_coalesce()
3079 return vsi->num_q_vectors; in ice_vsi_rebuild_get_coalesce()
3083 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
3104 * the number of Tx or Rx rings changes (the first for loop) in ice_vsi_rebuild_set_coalesce()
3108 for (i = 0; i < size && i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
3110 * both Tx and Rx: in ice_vsi_rebuild_set_coalesce()
3112 * and the loop variable is less than the number of rings in ice_vsi_rebuild_set_coalesce()
3116 * rings is less than are allocated (this means the number of in ice_vsi_rebuild_set_coalesce()
3117 * rings increased from previously), then write out the in ice_vsi_rebuild_set_coalesce()
3124 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { in ice_vsi_rebuild_set_coalesce()
3125 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
3126 rc->itr_settings = coalesce[i].itr_rx; in ice_vsi_rebuild_set_coalesce()
3127 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3128 } else if (i < vsi->alloc_rxq) { in ice_vsi_rebuild_set_coalesce()
3129 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
3130 rc->itr_settings = coalesce[0].itr_rx; in ice_vsi_rebuild_set_coalesce()
3131 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3134 if (i < vsi->alloc_txq && coalesce[i].tx_valid) { in ice_vsi_rebuild_set_coalesce()
3135 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
3136 rc->itr_settings = coalesce[i].itr_tx; in ice_vsi_rebuild_set_coalesce()
3137 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3138 } else if (i < vsi->alloc_txq) { in ice_vsi_rebuild_set_coalesce()
3139 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
3140 rc->itr_settings = coalesce[0].itr_tx; in ice_vsi_rebuild_set_coalesce()
3141 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3144 vsi->q_vectors[i]->intrl = coalesce[i].intrl; in ice_vsi_rebuild_set_coalesce()
3145 ice_set_q_vector_intrl(vsi->q_vectors[i]); in ice_vsi_rebuild_set_coalesce()
3151 for (; i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
3153 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
3154 rc->itr_settings = coalesce[0].itr_tx; in ice_vsi_rebuild_set_coalesce()
3155 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3158 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
3159 rc->itr_settings = coalesce[0].itr_rx; in ice_vsi_rebuild_set_coalesce()
3160 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3162 vsi->q_vectors[i]->intrl = coalesce[0].intrl; in ice_vsi_rebuild_set_coalesce()
3163 ice_set_q_vector_intrl(vsi->q_vectors[i]); in ice_vsi_rebuild_set_coalesce()
3168 * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
3174 u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq; in ice_vsi_realloc_stat_arrays()
3175 u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq; in ice_vsi_realloc_stat_arrays()
3179 struct ice_pf *pf = vsi->back; in ice_vsi_realloc_stat_arrays()
3180 u16 prev_txq = vsi->alloc_txq; in ice_vsi_realloc_stat_arrays()
3181 u16 prev_rxq = vsi->alloc_rxq; in ice_vsi_realloc_stat_arrays()
3184 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_vsi_realloc_stat_arrays()
3188 if (vsi_stat->tx_ring_stats[i]) { in ice_vsi_realloc_stat_arrays()
3189 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); in ice_vsi_realloc_stat_arrays()
3190 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); in ice_vsi_realloc_stat_arrays()
3195 tx_ring_stats = vsi_stat->tx_ring_stats; in ice_vsi_realloc_stat_arrays()
3196 vsi_stat->tx_ring_stats = in ice_vsi_realloc_stat_arrays()
3197 krealloc_array(vsi_stat->tx_ring_stats, req_txq, in ice_vsi_realloc_stat_arrays()
3198 sizeof(*vsi_stat->tx_ring_stats), in ice_vsi_realloc_stat_arrays()
3200 if (!vsi_stat->tx_ring_stats) { in ice_vsi_realloc_stat_arrays()
3201 vsi_stat->tx_ring_stats = tx_ring_stats; in ice_vsi_realloc_stat_arrays()
3202 return -ENOMEM; in ice_vsi_realloc_stat_arrays()
3207 if (vsi_stat->rx_ring_stats[i]) { in ice_vsi_realloc_stat_arrays()
3208 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); in ice_vsi_realloc_stat_arrays()
3209 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); in ice_vsi_realloc_stat_arrays()
3214 rx_ring_stats = vsi_stat->rx_ring_stats; in ice_vsi_realloc_stat_arrays()
3215 vsi_stat->rx_ring_stats = in ice_vsi_realloc_stat_arrays()
3216 krealloc_array(vsi_stat->rx_ring_stats, req_rxq, in ice_vsi_realloc_stat_arrays()
3217 sizeof(*vsi_stat->rx_ring_stats), in ice_vsi_realloc_stat_arrays()
3219 if (!vsi_stat->rx_ring_stats) { in ice_vsi_realloc_stat_arrays()
3220 vsi_stat->rx_ring_stats = rx_ring_stats; in ice_vsi_realloc_stat_arrays()
3221 return -ENOMEM; in ice_vsi_realloc_stat_arrays()
3228 * ice_vsi_rebuild - Rebuild VSI after reset
3246 return -EINVAL; in ice_vsi_rebuild()
3251 pf = vsi->back; in ice_vsi_rebuild()
3252 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) in ice_vsi_rebuild()
3253 return -EINVAL; in ice_vsi_rebuild()
3255 coalesce = kcalloc(vsi->num_q_vectors, in ice_vsi_rebuild()
3258 return -ENOMEM; in ice_vsi_rebuild()
3274 ret = -EIO; in ice_vsi_rebuild()
3295 * ice_is_reset_in_progress - check for a reset in progress
3307 * ice_wait_for_reset - Wait for driver to finish reset and rebuild
3316 * Returns 0 on success, -EBUSY if the reset is not finished within the
3317 * timeout, and -ERESTARTSYS if the thread was interrupted.
3323 ret = wait_event_interruptible_timeout(pf->reset_wait_queue, in ice_wait_for_reset()
3324 !ice_is_reset_in_progress(pf->state), in ice_wait_for_reset()
3329 return -EBUSY; in ice_wait_for_reset()
3335 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3341 vsi->info.mapping_flags = ctx->info.mapping_flags; in ice_vsi_update_q_map()
3342 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, in ice_vsi_update_q_map()
3343 sizeof(vsi->info.q_mapping)); in ice_vsi_update_q_map()
3344 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, in ice_vsi_update_q_map()
3345 sizeof(vsi->info.tc_mapping)); in ice_vsi_update_q_map()
3349 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
3355 struct net_device *netdev = vsi->netdev; in ice_vsi_cfg_netdev_tc()
3356 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_netdev_tc()
3357 int numtc = vsi->tc_cfg.numtc; in ice_vsi_cfg_netdev_tc()
3366 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_cfg_netdev_tc()
3374 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf)) in ice_vsi_cfg_netdev_tc()
3375 numtc = vsi->all_numtc; in ice_vsi_cfg_netdev_tc()
3380 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; in ice_vsi_cfg_netdev_tc()
3383 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_vsi_cfg_netdev_tc()
3385 vsi->tc_cfg.tc_info[i].netdev_tc, in ice_vsi_cfg_netdev_tc()
3386 vsi->tc_cfg.tc_info[i].qcount_tx, in ice_vsi_cfg_netdev_tc()
3387 vsi->tc_cfg.tc_info[i].qoffset); in ice_vsi_cfg_netdev_tc()
3390 if (!(vsi->all_enatc & BIT(i))) in ice_vsi_cfg_netdev_tc()
3392 if (!vsi->mqprio_qopt.qopt.count[i]) in ice_vsi_cfg_netdev_tc()
3395 vsi->mqprio_qopt.qopt.count[i], in ice_vsi_cfg_netdev_tc()
3396 vsi->mqprio_qopt.qopt.offset[i]); in ice_vsi_cfg_netdev_tc()
3399 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_netdev_tc()
3403 u8 ets_tc = dcbcfg->etscfg.prio_table[i]; in ice_vsi_cfg_netdev_tc()
3406 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; in ice_vsi_cfg_netdev_tc()
3412 * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
3424 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; in ice_vsi_setup_q_map_mqprio()
3425 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; in ice_vsi_setup_q_map_mqprio()
3430 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; in ice_vsi_setup_q_map_mqprio()
3437 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map_mqprio()
3439 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map_mqprio()
3440 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map_mqprio()
3441 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map_mqprio()
3442 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map_mqprio()
3443 ctxt->info.tc_mapping[i] = 0; in ice_vsi_setup_q_map_mqprio()
3447 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_vsi_setup_q_map_mqprio()
3448 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3449 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3450 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map_mqprio()
3451 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; in ice_vsi_setup_q_map_mqprio()
3452 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx; in ice_vsi_setup_q_map_mqprio()
3453 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map_mqprio()
3456 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map_mqprio()
3458 if (!(vsi->all_enatc & BIT(i))) in ice_vsi_setup_q_map_mqprio()
3460 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_vsi_setup_q_map_mqprio()
3461 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3462 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3467 if (new_txq > vsi->alloc_txq) { in ice_vsi_setup_q_map_mqprio()
3468 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3469 new_txq, vsi->alloc_txq); in ice_vsi_setup_q_map_mqprio()
3470 return -EINVAL; in ice_vsi_setup_q_map_mqprio()
3474 if (new_rxq > vsi->alloc_rxq) { in ice_vsi_setup_q_map_mqprio()
3475 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3476 new_rxq, vsi->alloc_rxq); in ice_vsi_setup_q_map_mqprio()
3477 return -EINVAL; in ice_vsi_setup_q_map_mqprio()
3480 /* Set actual Tx/Rx queue pairs */ in ice_vsi_setup_q_map_mqprio()
3481 vsi->num_txq = new_txq; in ice_vsi_setup_q_map_mqprio()
3482 vsi->num_rxq = new_rxq; in ice_vsi_setup_q_map_mqprio()
3485 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in ice_vsi_setup_q_map_mqprio()
3486 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map_mqprio()
3487 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount); in ice_vsi_setup_q_map_mqprio()
3492 if (tc0_qcount && tc0_qcount < vsi->num_rxq) { in ice_vsi_setup_q_map_mqprio()
3493 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount; in ice_vsi_setup_q_map_mqprio()
3494 vsi->next_base_q = tc0_qcount; in ice_vsi_setup_q_map_mqprio()
3496 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); in ice_vsi_setup_q_map_mqprio()
3497 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); in ice_vsi_setup_q_map_mqprio()
3498 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", in ice_vsi_setup_q_map_mqprio()
3499 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); in ice_vsi_setup_q_map_mqprio()
3505 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3514 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_tc()
3522 if (vsi->tc_cfg.ena_tc == ena_tc && in ice_vsi_cfg_tc()
3523 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) in ice_vsi_cfg_tc()
3531 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc()
3535 if (vsi->type == ICE_VSI_CHNL && in ice_vsi_cfg_tc()
3536 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_tc()
3537 max_txqs[i] = vsi->num_txq; in ice_vsi_cfg_tc()
3540 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); in ice_vsi_cfg_tc()
3541 vsi->tc_cfg.ena_tc = ena_tc; in ice_vsi_cfg_tc()
3542 vsi->tc_cfg.numtc = num_tc; in ice_vsi_cfg_tc()
3546 return -ENOMEM; in ice_vsi_cfg_tc()
3548 ctx->vf_num = 0; in ice_vsi_cfg_tc()
3549 ctx->info = vsi->info; in ice_vsi_cfg_tc()
3551 if (vsi->type == ICE_VSI_PF && in ice_vsi_cfg_tc()
3552 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_tc()
3558 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); in ice_vsi_cfg_tc()
3563 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); in ice_vsi_cfg_tc()
3564 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); in ice_vsi_cfg_tc()
3570 if (vsi->type == ICE_VSI_PF && in ice_vsi_cfg_tc()
3571 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_tc()
3572 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); in ice_vsi_cfg_tc()
3574 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, in ice_vsi_cfg_tc()
3575 vsi->tc_cfg.ena_tc, max_txqs); in ice_vsi_cfg_tc()
3579 vsi->vsi_num, ret); in ice_vsi_cfg_tc()
3583 vsi->info.valid_sections = 0; in ice_vsi_cfg_tc()
3592 * ice_update_ring_stats - Update ring statistics
3601 stats->bytes += bytes; in ice_update_ring_stats()
3602 stats->pkts += pkts; in ice_update_ring_stats()
3606 * ice_update_tx_ring_stats - Update Tx ring specific counters
3613 u64_stats_update_begin(&tx_ring->ring_stats->syncp); in ice_update_tx_ring_stats()
3614 ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes); in ice_update_tx_ring_stats()
3615 u64_stats_update_end(&tx_ring->ring_stats->syncp); in ice_update_tx_ring_stats()
3619 * ice_update_rx_ring_stats - Update Rx ring specific counters
3626 u64_stats_update_begin(&rx_ring->ring_stats->syncp); in ice_update_rx_ring_stats()
3627 ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes); in ice_update_rx_ring_stats()
3628 u64_stats_update_end(&rx_ring->ring_stats->syncp); in ice_update_rx_ring_stats()
3632 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3646 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3654 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL); in ice_is_vsi_dflt_vsi()
3658 * ice_set_dflt_vsi - set the default forwarding VSI
3673 return -EINVAL; in ice_set_dflt_vsi()
3675 dev = ice_pf_to_dev(vsi->back); in ice_set_dflt_vsi()
3677 if (ice_lag_is_switchdev_running(vsi->back)) { in ice_set_dflt_vsi()
3679 vsi->vsi_num); in ice_set_dflt_vsi()
3686 vsi->vsi_num); in ice_set_dflt_vsi()
3690 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX); in ice_set_dflt_vsi()
3693 vsi->vsi_num, status); in ice_set_dflt_vsi()
3701 * ice_clear_dflt_vsi - clear the default forwarding VSI
3714 return -EINVAL; in ice_clear_dflt_vsi()
3716 dev = ice_pf_to_dev(vsi->back); in ice_clear_dflt_vsi()
3719 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) in ice_clear_dflt_vsi()
3720 return -ENODEV; in ice_clear_dflt_vsi()
3722 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false, in ice_clear_dflt_vsi()
3726 vsi->vsi_num, status); in ice_clear_dflt_vsi()
3727 return -EIO; in ice_clear_dflt_vsi()
3734 * ice_get_link_speed_mbps - get link speed in Mbps
3743 link_speed = vsi->port_info->phy.link_info.link_speed; in ice_get_link_speed_mbps()
3745 return (int)ice_get_link_speed(fls(link_speed) - 1); in ice_get_link_speed_mbps()
3749 * ice_get_link_speed_kbps - get link speed in Kbps
3764 * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
3766 * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit
3769 * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
3774 struct ice_pf *pf = vsi->back; in ice_set_min_bw_limit()
3780 if (!vsi->port_info) { in ice_set_min_bw_limit()
3782 vsi->idx, vsi->type); in ice_set_min_bw_limit()
3783 return -EINVAL; in ice_set_min_bw_limit()
3788 …dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed… in ice_set_min_bw_limit()
3789 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, in ice_set_min_bw_limit()
3791 return -EINVAL; in ice_set_min_bw_limit()
3796 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, in ice_set_min_bw_limit()
3799 dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n", in ice_set_min_bw_limit()
3800 min_tx_rate, ice_vsi_type_str(vsi->type), in ice_set_min_bw_limit()
3801 vsi->idx); in ice_set_min_bw_limit()
3805 dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n", in ice_set_min_bw_limit()
3806 min_tx_rate, ice_vsi_type_str(vsi->type)); in ice_set_min_bw_limit()
3808 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, in ice_set_min_bw_limit()
3809 vsi->idx, 0, in ice_set_min_bw_limit()
3812 dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n", in ice_set_min_bw_limit()
3813 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_min_bw_limit()
3817 dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n", in ice_set_min_bw_limit()
3818 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_min_bw_limit()
3825 * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
3827 * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit
3830 * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
3835 struct ice_pf *pf = vsi->back; in ice_set_max_bw_limit()
3841 if (!vsi->port_info) { in ice_set_max_bw_limit()
3843 vsi->idx, vsi->type); in ice_set_max_bw_limit()
3844 return -EINVAL; in ice_set_max_bw_limit()
3849 …dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed… in ice_set_max_bw_limit()
3850 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, in ice_set_max_bw_limit()
3852 return -EINVAL; in ice_set_max_bw_limit()
3857 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, in ice_set_max_bw_limit()
3860 dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n", in ice_set_max_bw_limit()
3861 max_tx_rate, ice_vsi_type_str(vsi->type), in ice_set_max_bw_limit()
3862 vsi->idx); in ice_set_max_bw_limit()
3866 dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n", in ice_set_max_bw_limit()
3867 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3869 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, in ice_set_max_bw_limit()
3870 vsi->idx, 0, in ice_set_max_bw_limit()
3873 dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n", in ice_set_max_bw_limit()
3874 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3878 dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n", in ice_set_max_bw_limit()
3879 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3886 * ice_set_link - turn on/off physical link
3892 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_link()
3893 struct ice_port_info *pi = vsi->port_info; in ice_set_link()
3894 struct ice_hw *hw = pi->hw; in ice_set_link()
3897 if (vsi->type != ICE_VSI_PF) in ice_set_link()
3898 return -EINVAL; in ice_set_link()
3907 if (status == -EIO) { in ice_set_link()
3908 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) in ice_set_link()
3911 ice_aq_str(hw->adminq.sq_last_status)); in ice_set_link()
3915 ice_aq_str(hw->adminq.sq_last_status)); in ice_set_link()
3923 * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
3946 err = vlan_ops->add_vlan(vsi, &vlan); in ice_vsi_add_vlan_zero()
3947 if (err && err != -EEXIST) in ice_vsi_add_vlan_zero()
3951 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_add_vlan_zero()
3955 err = vlan_ops->add_vlan(vsi, &vlan); in ice_vsi_add_vlan_zero()
3956 if (err && err != -EEXIST) in ice_vsi_add_vlan_zero()
3963 * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
3976 err = vlan_ops->del_vlan(vsi, &vlan); in ice_vsi_del_vlan_zero()
3977 if (err && err != -EEXIST) in ice_vsi_del_vlan_zero()
3981 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_del_vlan_zero()
3985 err = vlan_ops->del_vlan(vsi, &vlan); in ice_vsi_del_vlan_zero()
3986 if (err && err != -EEXIST) in ice_vsi_del_vlan_zero()
3992 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vsi_del_vlan_zero()
3997 * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
4008 if (vsi->type == ICE_VSI_VF) { in ice_vsi_num_zero_vlans()
4009 if (WARN_ON(!vsi->vf)) in ice_vsi_num_zero_vlans()
4012 if (ice_vf_is_port_vlan_ena(vsi->vf)) in ice_vsi_num_zero_vlans()
4016 if (ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_num_zero_vlans()
4023 * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
4024 * @vsi: VSI used to determine if any non-zero VLANs have been added
4028 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi)); in ice_vsi_has_non_zero_vlans()
4032 * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
4033 * @vsi: VSI used to get the number of non-zero VLANs added
4037 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi)); in ice_vsi_num_non_zero_vlans()
4052 return test_bit(f, pf->features); in ice_is_feature_supported()
4065 set_bit(f, pf->features); in ice_set_feature_support()
4078 clear_bit(f, pf->features); in ice_clear_feature_support()
4089 switch (pf->hw.device_id) { in ice_init_feature_support()
4097 if (ice_is_phy_rclk_in_netlist(&pf->hw)) in ice_init_feature_support()
4099 /* If we don't own the timer - don't enable other caps */ in ice_init_feature_support()
4102 if (ice_is_cgu_in_netlist(&pf->hw)) in ice_init_feature_support()
4104 if (ice_is_clock_mux_in_netlist(&pf->hw)) in ice_init_feature_support()
4106 if (ice_gnss_is_gps_present(&pf->hw)) in ice_init_feature_support()
4115 * ice_vsi_update_security - update security block in VSI
4124 ctx.info = vsi->info; in ice_vsi_update_security()
4128 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_security()
4129 return -ENODEV; in ice_vsi_update_security()
4131 vsi->info = ctx.info; in ice_vsi_update_security()
4136 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
4141 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | in ice_vsi_ctx_set_antispoof()
4147 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
4152 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & in ice_vsi_ctx_clear_antispoof()
4158 * ice_vsi_ctx_set_allow_override - allow destination override on VSI
4163 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; in ice_vsi_ctx_set_allow_override()
4167 * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
4172 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; in ice_vsi_ctx_clear_allow_override()
4176 * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
4184 .info = vsi->info, in ice_vsi_update_local_lb()
4193 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_local_lb()
4194 return -ENODEV; in ice_vsi_update_local_lb()
4196 vsi->info = ctx.info; in ice_vsi_update_local_lb()