Lines Matching +full:tcs +full:- +full:wait

1 /* SPDX-License-Identifier: GPL-2.0 */
19 #include <linux/dma-mapping.h>
22 #include <linux/wait.h>
121 #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
129 #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))
130 #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
131 #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
132 #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
158 for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
162 for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
165 for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++)
168 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
172 for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
175 for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
178 for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
197 #define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
199 #define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned)
231 u32 q_teid; /* Tx-scheduler element identifier */
246 u8 numtc; /* Total number of enabled TCs */
252 struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */
353 u16 idx; /* software index in pf->vsi[] */
370 #define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1)
401 u16 *txq_map; /* index in pf->avail_txqs */
402 u16 *rxq_map; /* index in pf->avail_rxqs */
433 * it can be used after tc-qdisc delete, to get back RSS setting as
438 * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue
460 u16 v_idx; /* index in the vsi->q_vector array. */
559 /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
560 * number of MSIX vectors needed for all SR-IOV VFs from the number of
567 u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
600 /* spinlock to protect the AdminQ wait list */
617 u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
689 return !!qv->ch; /* Enable it to run with TC */ in ice_vector_ch_enabled()
693 * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt
701 return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE; in ice_ptp_pf_handles_tx_interrupt()
705 * ice_irq_dynamic_ena - Enable default interrupt generation settings
714 u32 vector = (vsi && q_vector) ? q_vector->reg_idx : in ice_irq_dynamic_ena()
715 ((struct ice_pf *)hw->back)->oicr_irq.index; in ice_irq_dynamic_ena()
725 if (test_bit(ICE_VSI_DOWN, vsi->state)) in ice_irq_dynamic_ena()
731 * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev
738 return np->vsi->back; in ice_netdev_to_pf()
743 return !!READ_ONCE(vsi->xdp_prog); in ice_is_xdp_ena_vsi()
748 ring->flags |= ICE_TX_FLAGS_RING_XDP; in ice_set_ring_xdp()
752 * ice_xsk_pool - get XSK buffer pool bound to a ring
760 struct ice_vsi *vsi = ring->vsi; in ice_xsk_pool()
761 u16 qid = ring->q_index; in ice_xsk_pool()
763 if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) in ice_xsk_pool()
766 return xsk_get_pool_from_qid(vsi->netdev, qid); in ice_xsk_pool()
770 * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
781 * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
787 ring = vsi->rx_rings[qid]->xdp_ring; in ice_tx_xsk_pool()
791 if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) { in ice_tx_xsk_pool()
792 ring->xsk_pool = NULL; in ice_tx_xsk_pool()
796 ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid); in ice_tx_xsk_pool()
800 * ice_get_main_vsi - Get the PF VSI
803 * returns pf->vsi[0], which by definition is the PF VSI
807 if (pf->vsi) in ice_get_main_vsi()
808 return pf->vsi[0]; in ice_get_main_vsi()
814 * ice_get_netdev_priv_vsi - return VSI associated with netdev priv.
820 if (np->repr) in ice_get_netdev_priv_vsi()
821 return np->repr->src_vsi; in ice_get_netdev_priv_vsi()
823 return np->vsi; in ice_get_netdev_priv_vsi()
827 * ice_get_ctrl_vsi - Get the control VSI
832 /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */ in ice_get_ctrl_vsi()
833 if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI) in ice_get_ctrl_vsi()
836 return pf->vsi[pf->ctrl_vsi_idx]; in ice_get_ctrl_vsi()
840 * ice_find_vsi - Find the VSI from VSI ID
849 if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) in ice_find_vsi()
850 return pf->vsi[i]; in ice_find_vsi()
855 * ice_is_switchdev_running - check if switchdev is configured
863 return pf->eswitch.is_running; in ice_is_switchdev_running()
875 * ice_is_adq_active - any active ADQs
891 if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC && in ice_is_adq_active()
892 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_is_adq_active()
988 * ice_set_rdma_cap - enable RDMA support
993 if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { in ice_set_rdma_cap()
994 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_rdma_cap()
995 set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); in ice_set_rdma_cap()
1000 * ice_clear_rdma_cap - disable RDMA support
1008 clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); in ice_clear_rdma_cap()
1009 set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags); in ice_clear_rdma_cap()
1010 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_clear_rdma_cap()