| /linux/drivers/net/ethernet/intel/ixgbe/ |
| H A D | ixgbe_lib.c | 22 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_cache_ring_dcb_sriov() local 36 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov() 39 if ((reg_idx & ~vmdq->mask) >= tcs) { in ixgbe_cache_ring_dcb_sriov() 41 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov() 47 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov() 50 if ((reg_idx & ~vmdq->mask) >= tcs) in ixgbe_cache_ring_dcb_sriov() 51 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov() 66 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov() 69 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; in ixgbe_cache_ring_dcb_sriov() 71 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; in ixgbe_cache_ring_dcb_sriov() [all …]
|
| H A D | ixgbe_common.h | 47 int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 66 int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 67 int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); 68 int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
|
| H A D | ixgbe_sriov.c | 702 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_reset_event() local 704 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_vf_reset_event() 813 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_write_qde() local 814 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_write_qde() 883 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_reset_msg() local 889 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_vf_reset_msg() 1066 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_get_vf_queues() local 1085 msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_get_vf_queues() 1086 msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_get_vf_queues() 1700 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_set_vf_rate_limit() local [all …]
|
| H A D | ixgbe_common.c | 1839 int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, in ixgbe_set_rar_generic() argument 1852 hw->mac.ops.set_vmdq(hw, index, vmdq); in ixgbe_set_rar_generic() 2962 int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) in ixgbe_clear_vmdq_generic() argument 2982 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { in ixgbe_clear_vmdq_generic() 2991 } else if (vmdq < 32) { in ixgbe_clear_vmdq_generic() 2992 mpsar_lo &= ~BIT(vmdq); in ixgbe_clear_vmdq_generic() 2995 mpsar_hi &= ~BIT(vmdq - 32); in ixgbe_clear_vmdq_generic() 3013 int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) in ixgbe_set_vmdq_generic() argument 3024 if (vmdq < 32) { in ixgbe_set_vmdq_generic() 3026 mpsar |= BIT(vmdq); in ixgbe_set_vmdq_generic() [all …]
|
| H A D | ixgbe_82598.c | 785 static int ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) in ixgbe_set_vmdq_82598() argument 798 rar_high |= FIELD_PREP(IXGBE_RAH_VIND_MASK, vmdq); in ixgbe_set_vmdq_82598() 809 static int ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) in ixgbe_clear_vmdq_82598() argument
|
| H A D | ixgbe_type_e610.h | 876 u8 vmdq; /* VMDQ supported */ member
|
| H A D | ixgbe_e610.c | 566 caps->vmdq = (number == 1); in ixgbe_parse_e610_caps()
|
| H A D | ixgbe_main.c | 8219 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_tx_pending() local 8220 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_vf_tx_pending() 10072 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in handle_redirect_action() local 10082 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask); in handle_redirect_action()
|
| /linux/drivers/net/ethernet/wangxun/libwx/ |
| H A D | wx_sriov.c | 280 struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; in wx_write_qde() local 281 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in wx_write_qde() 341 struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; in wx_get_vf_queues() local 344 msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); in wx_get_vf_queues() 345 msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); in wx_get_vf_queues()
|
| H A D | wx_lib.c | 2005 struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; in wx_cache_ring_vmdq() local 2016 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); in wx_cache_ring_vmdq() 2019 if ((reg_idx & ~vmdq->mask) >= rss->indices) in wx_cache_ring_vmdq() 2020 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); in wx_cache_ring_vmdq() 2023 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); in wx_cache_ring_vmdq() 2027 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); in wx_cache_ring_vmdq() 2032 reg_idx = vmdq->offset; in wx_cache_ring_vmdq() 2037 reg_idx = vmdq->offset; in wx_cache_ring_vmdq()
|
| H A D | wx_hw.c | 845 static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq) in wx_clear_vmdq() argument
|
| /linux/drivers/net/ethernet/intel/ixgbevf/ |
| H A D | vf.c | 437 u32 vmdq) in ixgbevf_set_rar_vf() argument 473 u32 vmdq) in ixgbevf_hv_set_rar_vf() argument
|
| /linux/drivers/net/ethernet/intel/i40e/ |
| H A D | i40e_type.h | 221 bool vmdq; member
|
| H A D | i40e_common.c | 2707 p->vmdq = true; in i40e_parse_discover_capabilities()
|
| H A D | i40e_main.c | 12761 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { in i40e_sw_init()
|