Searched refs:RING_F_VMDQ (Results 1 – 7 of 7) sorted by relevance
22 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_cache_ring_dcb_sriov()189 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_cache_ring_sriov()329 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; in ixgbe_set_dcb_sriov_queues()348 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; in ixgbe_set_dcb_sriov_queues()366 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; in ixgbe_set_dcb_sriov_queues()369 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; in ixgbe_set_dcb_sriov_queues()370 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; in ixgbe_set_dcb_sriov_queues()502 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; in ixgbe_set_sriov_queues()518 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; in ixgbe_set_sriov_queues()542 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; in ixgbe_set_sriov_queues()[all …]
74 adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; in __ixgbe_enable_sriov()266 adapter->ring_feature[RING_F_VMDQ].offset = 0; in ixgbe_disable_sriov()702 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_reset_event()813 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_write_qde()883 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_reset_msg()1066 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_get_vf_queues()1700 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_set_vf_rate_limit()
176 #define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)410 RING_F_VMDQ, /* SR-IOV uses the same ring feature */ enumerator
4014 else if (adapter->ring_feature[RING_F_VMDQ].mask == in ixgbe_setup_mtqc()4388 else if (adapter->ring_feature[RING_F_VMDQ].mask == in ixgbe_setup_mrqc()4669 switch (adapter->ring_feature[RING_F_VMDQ].mask) { in ixgbe_configure_virtualization()6167 switch (adapter->ring_feature[RING_F_VMDQ].mask) { in ixgbe_setup_gpie()6890 adapter->ring_feature[RING_F_VMDQ].limit = 1; in ixgbe_sw_init()8219 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_tx_pending()10072 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in handle_redirect_action()10505 if (!adapter->ring_feature[RING_F_VMDQ].offset) in ixgbe_reset_l2fw_offload()10510 adapter->ring_feature[RING_F_VMDQ].limit = 1; in ixgbe_reset_l2fw_offload()10786 adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools; in ixgbe_fwd_add()
64 wx->ring_feature[RING_F_VMDQ].offset = 0; in wx_sriov_clear_data()69 if (wx->ring_feature[RING_F_VMDQ].limit == 1) in wx_sriov_clear_data()86 if (!wx->ring_feature[RING_F_VMDQ].limit) in __wx_enable_sriov()87 wx->ring_feature[RING_F_VMDQ].limit = 1; in __wx_enable_sriov()88 wx->ring_feature[RING_F_VMDQ].offset = num_vfs; in __wx_enable_sriov()280 struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; in wx_write_qde()341 struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; in wx_get_vf_queues()
1776 u16 vmdq_i = wx->ring_feature[RING_F_VMDQ].limit; in wx_set_vmdq_queues()1785 vmdq_i += wx->ring_feature[RING_F_VMDQ].offset; in wx_set_vmdq_queues()1813 vmdq_i -= wx->ring_feature[RING_F_VMDQ].offset; in wx_set_vmdq_queues()1816 wx->ring_feature[RING_F_VMDQ].indices = vmdq_i; in wx_set_vmdq_queues()1817 wx->ring_feature[RING_F_VMDQ].mask = vmdq_m; in wx_set_vmdq_queues()2005 struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; in wx_cache_ring_vmdq()
554 #define VMDQ_P(p) ((p) + wx->ring_feature[RING_F_VMDQ].offset)1137 RING_F_VMDQ, enumerator