/linux/drivers/net/ethernet/netronome/nfp/nfd3/ |
H A D | rings.c | 11 static void nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_xsk_tx_bufs_free() argument 16 while (tx_ring->rd_p != tx_ring->wr_p) { in nfp_nfd3_xsk_tx_bufs_free() 17 idx = D_IDX(tx_ring, tx_ring->rd_p); in nfp_nfd3_xsk_tx_bufs_free() 18 txbuf = &tx_ring->txbufs[idx]; in nfp_nfd3_xsk_tx_bufs_free() 22 tx_ring->qcp_rd_p++; in nfp_nfd3_xsk_tx_bufs_free() 23 tx_ring->rd_p++; in nfp_nfd3_xsk_tx_bufs_free() 25 if (tx_ring->r_vec->xsk_pool) { in nfp_nfd3_xsk_tx_bufs_free() 29 xsk_tx_completed(tx_ring in nfp_nfd3_xsk_tx_bufs_free() 42 nfp_nfd3_tx_ring_reset(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring) nfp_nfd3_tx_ring_reset() argument 101 nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring * tx_ring) nfp_nfd3_tx_ring_free() argument 127 nfp_nfd3_tx_ring_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring) nfp_nfd3_tx_ring_alloc() argument 161 nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring) nfp_nfd3_tx_ring_bufs_free() argument 179 nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring) nfp_nfd3_tx_ring_bufs_alloc() argument 201 nfp_nfd3_print_tx_descs(struct seq_file * file,struct nfp_net_r_vector * r_vec,struct nfp_net_tx_ring * tx_ring,u32 d_rd_p,u32 d_wr_p) nfp_nfd3_print_tx_descs() argument [all...] |
H A D | xsk.c | 17 struct nfp_net_tx_ring *tx_ring, in nfp_nfd3_xsk_tx_xdp() argument 26 if (nfp_net_tx_space(tx_ring) < 1) in nfp_nfd3_xsk_tx_xdp() 32 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); in nfp_nfd3_xsk_tx_xdp() 34 txbuf = &tx_ring->txbufs[wr_idx]; in nfp_nfd3_xsk_tx_xdp() 40 txd = &tx_ring->txds[wr_idx]; in nfp_nfd3_xsk_tx_xdp() 50 tx_ring->wr_ptr_add++; in nfp_nfd3_xsk_tx_xdp() 51 tx_ring->wr_p++; in nfp_nfd3_xsk_tx_xdp() 120 struct nfp_net_tx_ring *tx_ring; in nfp_nfd3_xsk_rx() local 126 tx_ring in nfp_nfd3_xsk_rx() 275 nfp_nfd3_xsk_complete(struct nfp_net_tx_ring * tx_ring) nfp_nfd3_xsk_complete() argument 333 nfp_nfd3_xsk_tx(struct nfp_net_tx_ring * tx_ring) nfp_nfd3_xsk_tx() argument [all...] |
H A D | dp.c | 31 static int nfp_nfd3_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_tx_ring_should_wake() argument 33 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4); in nfp_nfd3_tx_ring_should_wake() 36 static int nfp_nfd3_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_tx_ring_should_stop() argument 38 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1); in nfp_nfd3_tx_ring_should_stop() 44 * @tx_ring: driver tx queue structure 52 struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_tx_ring_stop() argument 58 if (unlikely(nfp_nfd3_tx_ring_should_wake(tx_ring))) in nfp_nfd3_tx_ring_stop() 259 struct nfp_net_tx_ring *tx_ring; in nfp_nfd3_tx() local 274 tx_ring = &dp->tx_rings[qidx]; in nfp_nfd3_tx() 275 r_vec = tx_ring in nfp_nfd3_tx() 421 nfp_nfd3_tx_complete(struct nfp_net_tx_ring * tx_ring,int budget) nfp_nfd3_tx_complete() argument 506 nfp_nfd3_xdp_complete(struct nfp_net_tx_ring * tx_ring) nfp_nfd3_xdp_complete() argument 837 nfp_nfd3_tx_xdp_buf(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring,struct nfp_net_tx_ring * tx_ring,struct nfp_net_rx_buf * rxbuf,unsigned int dma_off,unsigned int pkt_len,bool * completed) nfp_nfd3_tx_xdp_buf() argument 910 struct nfp_net_tx_ring *tx_ring; nfp_nfd3_rx() local 1211 struct nfp_net_tx_ring *tx_ring; nfp_nfd3_ctrl_tx_one() local [all...] |
/linux/drivers/net/ethernet/netronome/nfp/nfdk/ |
H A D | rings.c | 11 nfp_nfdk_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) in nfp_nfdk_tx_ring_reset() argument 16 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { in nfp_nfdk_tx_ring_reset() 23 rd_idx = D_IDX(tx_ring, tx_ring->rd_p); in nfp_nfdk_tx_ring_reset() 24 txbuf = &tx_ring->ktxbufs[rd_idx]; in nfp_nfdk_tx_ring_reset() 28 n_descs = D_BLOCK_CPL(tx_ring->rd_p); in nfp_nfdk_tx_ring_reset() 57 tx_ring->rd_p += n_descs; in nfp_nfdk_tx_ring_reset() 60 memset(tx_ring in nfp_nfdk_tx_ring_reset() 74 nfp_nfdk_tx_ring_free(struct nfp_net_tx_ring * tx_ring) nfp_nfdk_tx_ring_free() argument 93 nfp_nfdk_tx_ring_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring) nfp_nfdk_tx_ring_alloc() argument 126 nfp_nfdk_tx_ring_bufs_free(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring) nfp_nfdk_tx_ring_bufs_free() argument 132 nfp_nfdk_tx_ring_bufs_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring) nfp_nfdk_tx_ring_bufs_alloc() argument 140 nfp_nfdk_print_tx_descs(struct seq_file * file,struct nfp_net_r_vector * r_vec,struct nfp_net_tx_ring * tx_ring,u32 d_rd_p,u32 d_wr_p) nfp_nfdk_print_tx_descs() argument [all...] |
/linux/drivers/net/ethernet/amazon/ena/ |
H A D | ena_xdp.c | 8 static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id) in validate_xdp_req_id() argument 12 tx_info = &tx_ring->tx_buffer_info[req_id]; in validate_xdp_req_id() 16 return handle_invalid_req_id(tx_ring, req_id, tx_info, true); in validate_xdp_req_id() 19 static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring, in ena_xdp_tx_map_frame() argument 24 struct ena_adapter *adapter = tx_ring->adapter; in ena_xdp_tx_map_frame() 35 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_xdp_tx_map_frame() 37 push_len = min_t(u32, size, tx_ring->tx_max_header_size); in ena_xdp_tx_map_frame() 48 dma = dma_map_single(tx_ring->dev, in ena_xdp_tx_map_frame() 52 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_xdp_tx_map_frame() 68 ena_increase_stat(&tx_ring in ena_xdp_tx_map_frame() 75 ena_xdp_xmit_frame(struct ena_ring * tx_ring,struct ena_adapter * adapter,struct xdp_frame * xdpf,int flags) ena_xdp_xmit_frame() argument 125 struct ena_ring *tx_ring; ena_xdp_xmit() local 367 ena_clean_xdp_irq(struct ena_ring * tx_ring,u32 budget) ena_clean_xdp_irq() argument 431 struct ena_ring *tx_ring; ena_xdp_io_poll() local [all...] |
H A D | ena_netdev.c | 54 struct ena_ring *tx_ring; in ena_tx_timeout() local 63 tx_ring = &adapter->tx_ring[txqueue]; in ena_tx_timeout() 65 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); in ena_tx_timeout() 66 napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED); in ena_tx_timeout() 193 txr = &adapter->tx_ring[i]; in ena_init_io_rings() 223 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; in ena_init_io_rings() 236 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_setup_tx_resources() local 240 if (tx_ring in ena_setup_tx_resources() 303 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; ena_free_tx_resources() local 662 ena_unmap_tx_buff(struct ena_ring * tx_ring,struct ena_tx_buffer * tx_info) ena_unmap_tx_buff() argument 695 ena_free_tx_bufs(struct ena_ring * tx_ring) ena_free_tx_bufs() argument 735 struct ena_ring *tx_ring; ena_free_all_tx_bufs() local 796 validate_tx_req_id(struct ena_ring * tx_ring,u16 req_id) validate_tx_req_id() argument 807 ena_clean_tx_irq(struct ena_ring * tx_ring,u32 budget) ena_clean_tx_irq() argument 1368 ena_unmask_interrupt(struct ena_ring * tx_ring,struct ena_ring * rx_ring) ena_unmask_interrupt() argument 1401 ena_update_ring_numa_node(struct ena_ring * tx_ring,struct ena_ring * rx_ring) ena_update_ring_numa_node() argument 1440 struct ena_ring *tx_ring, *rx_ring; ena_io_poll() local 1773 struct ena_ring *rx_ring, *tx_ring; ena_init_napi_in_range() local 1893 struct ena_ring *tx_ring; ena_create_io_tx_queue() local 2472 ena_check_and_linearize_skb(struct ena_ring * tx_ring,struct sk_buff * skb) ena_check_and_linearize_skb() argument 2498 ena_tx_map_skb(struct ena_ring * tx_ring,struct ena_tx_buffer * tx_info,struct sk_buff * skb,void ** push_hdr,u16 * header_len) ena_tx_map_skb() argument 2608 struct ena_ring *tx_ring; ena_start_xmit() local 2810 struct ena_ring *rx_ring, *tx_ring; ena_get_stats64() local 3417 check_missing_comp_in_tx_queue(struct ena_adapter * adapter,struct ena_ring * tx_ring) check_missing_comp_in_tx_queue() argument 3508 struct ena_ring *tx_ring; check_for_missing_completions() local [all...] |
/linux/drivers/net/ethernet/netronome/nfp/ |
H A D | nfp_net_dp.h | 42 * @tx_ring: TX ring to check 51 static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt) in nfp_net_tx_full() argument 53 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt); in nfp_net_tx_full() 56 static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring) in nfp_net_tx_xmit_more_flush() argument 59 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add); in nfp_net_tx_xmit_more_flush() 60 tx_ring->wr_ptr_add = 0; in nfp_net_tx_xmit_more_flush() 64 nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struc argument 167 nfp_net_tx_ring_reset(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring) nfp_net_tx_ring_reset() argument 180 nfp_net_tx_ring_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring) nfp_net_tx_ring_alloc() argument 186 nfp_net_tx_ring_free(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring) nfp_net_tx_ring_free() argument 193 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring) nfp_net_tx_ring_bufs_alloc() argument 200 nfp_net_tx_ring_bufs_free(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring) nfp_net_tx_ring_bufs_free() argument 208 nfp_net_debugfs_print_tx_descs(struct seq_file * file,struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,struct nfp_net_tx_ring * tx_ring,u32 d_rd_p,u32 d_wr_p) nfp_net_debugfs_print_tx_descs() argument [all...] |
H A D | nfp_net_debugfs.c | 83 struct nfp_net_tx_ring *tx_ring; in __nfp_tx_q_show() local 90 tx_ring = r_vec->xdp_ring; in __nfp_tx_q_show() 92 tx_ring = r_vec->tx_ring; in __nfp_tx_q_show() 93 if (!r_vec->nfp_net || !tx_ring) in __nfp_tx_q_show() 99 d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); in __nfp_tx_q_show() 100 d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); in __nfp_tx_q_show() 103 tx_ring->idx, tx_ring->qcidx, in __nfp_tx_q_show() 104 tx_ring in __nfp_tx_q_show() [all...] |
/linux/drivers/net/ethernet/intel/ice/ |
H A D | ice_txrx.c | 38 struct ice_tx_ring *tx_ring; in ice_prgm_fdir_fltr() local 47 tx_ring = vsi->tx_rings[0]; in ice_prgm_fdir_fltr() 48 if (!tx_ring || !tx_ring->desc) in ice_prgm_fdir_fltr() 50 dev = tx_ring->dev; in ice_prgm_fdir_fltr() 53 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { in ice_prgm_fdir_fltr() 66 i = tx_ring->next_to_use; in ice_prgm_fdir_fltr() 67 first = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr() 68 f_desc = ICE_TX_FDIRDESC(tx_ring, i); in ice_prgm_fdir_fltr() 72 i = (i < tx_ring in ice_prgm_fdir_fltr() 150 ice_clean_tx_ring(struct ice_tx_ring * tx_ring) ice_clean_tx_ring() argument 192 ice_free_tx_ring(struct ice_tx_ring * tx_ring) ice_free_tx_ring() argument 216 ice_clean_tx_irq(struct ice_tx_ring * tx_ring,int napi_budget) ice_clean_tx_irq() argument 340 ice_setup_tx_ring(struct ice_tx_ring * tx_ring) ice_setup_tx_ring() argument 1413 struct ice_tx_ring *tx_ring; __ice_update_sample() local 1588 struct ice_tx_ring *tx_ring; ice_napi_poll() local 1673 __ice_maybe_stop_tx(struct ice_tx_ring * tx_ring,unsigned int size) __ice_maybe_stop_tx() argument 1696 ice_maybe_stop_tx(struct ice_tx_ring * tx_ring,unsigned int size) ice_maybe_stop_tx() argument 1715 ice_tx_map(struct ice_tx_ring * tx_ring,struct ice_tx_buf * first,struct ice_tx_offload_params * off) ice_tx_map() argument 1869 const struct ice_tx_ring *tx_ring = off->tx_ring; ice_tx_csum() local 2085 ice_tx_prepare_vlan_flags(struct ice_tx_ring * tx_ring,struct ice_tx_buf * first) ice_tx_prepare_vlan_flags() argument 2398 ice_tstamp(struct ice_tx_ring * tx_ring,struct sk_buff * skb,struct ice_tx_buf * first,struct ice_tx_offload_params * off) ice_tstamp() argument 2432 ice_xmit_frame_ring(struct sk_buff * skb,struct ice_tx_ring * tx_ring) ice_xmit_frame_ring() argument 2551 struct ice_tx_ring *tx_ring; ice_start_xmit() local 2601 ice_clean_ctrl_tx_irq(struct ice_tx_ring * tx_ring) ice_clean_ctrl_tx_irq() argument [all...] |
/linux/drivers/net/ethernet/intel/iavf/ |
H A D | iavf_txrx.c | 77 * @tx_ring: ring to be cleaned 79 static void iavf_clean_tx_ring(struct iavf_ring *tx_ring) in iavf_clean_tx_ring() argument 85 if (!tx_ring->tx_bi) in iavf_clean_tx_ring() 89 for (i = 0; i < tx_ring->count; i++) in iavf_clean_tx_ring() 90 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in iavf_clean_tx_ring() 92 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; in iavf_clean_tx_ring() 93 memset(tx_ring->tx_bi, 0, bi_size); in iavf_clean_tx_ring() 96 memset(tx_ring->desc, 0, tx_ring in iavf_clean_tx_ring() 114 iavf_free_tx_resources(struct iavf_ring * tx_ring) iavf_free_tx_resources() argument 179 struct iavf_ring *tx_ring = NULL; iavf_detect_recover_hung() local 234 iavf_clean_tx_irq(struct iavf_vsi * vsi,struct iavf_ring * tx_ring,int napi_budget) iavf_clean_tx_irq() argument 670 iavf_setup_tx_descriptors(struct iavf_ring * tx_ring) iavf_setup_tx_descriptors() argument 1697 iavf_tx_prepare_vlan_flags(struct sk_buff * skb,struct iavf_ring * tx_ring,u32 * flags) iavf_tx_prepare_vlan_flags() argument 1847 iavf_tx_enable_csum(struct sk_buff * skb,u32 * tx_flags,u32 * td_cmd,u32 * td_offset,struct iavf_ring * tx_ring,u32 * cd_tunneling) iavf_tx_enable_csum() argument 2009 iavf_create_tx_ctx(struct iavf_ring * tx_ring,const u64 cd_type_cmd_tso_mss,const u32 cd_tunneling,const u32 cd_l2tag2) iavf_create_tx_ctx() argument 2124 __iavf_maybe_stop_tx(struct iavf_ring * tx_ring,int size) __iavf_maybe_stop_tx() argument 2150 iavf_tx_map(struct iavf_ring * tx_ring,struct sk_buff * skb,struct iavf_tx_buffer * first,u32 tx_flags,const u8 hdr_len,u32 td_cmd,u32 td_offset) iavf_tx_map() argument 2292 iavf_xmit_frame_ring(struct sk_buff * skb,struct iavf_ring * tx_ring) iavf_xmit_frame_ring() argument 2394 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; iavf_xmit_frame() local [all...] |
/linux/drivers/infiniband/hw/hfi1/ |
H A D | ipoib_tx.c | 49 return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, in hfi1_ipoib_used() 50 txq->tx_ring.complete_txreqs); in hfi1_ipoib_used() 56 if (atomic_inc_return(&txq->tx_ring.stops) == 1) in hfi1_ipoib_stop_txq() 63 if (atomic_dec_and_test(&txq->tx_ring.stops)) in hfi1_ipoib_wake_txq() 70 txq->tx_ring.max_items - 1); in hfi1_ipoib_ring_hwat() 76 txq->tx_ring.max_items) >> 1; in hfi1_ipoib_ring_lwat() 81 ++txq->tx_ring.sent_txreqs; in hfi1_ipoib_check_queue_depth() 83 !atomic_xchg(&txq->tx_ring.ring_full, 1)) { in hfi1_ipoib_check_queue_depth() 108 atomic_xchg(&txq->tx_ring.ring_full, 0)) { in hfi1_ipoib_check_queue_stopped() 136 struct hfi1_ipoib_circ_buf *tx_ring in hfi1_ipoib_drain_tx_ring() local 158 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; hfi1_ipoib_poll_tx_ring() local 355 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; hfi1_ipoib_send_dma_common() local 461 struct hfi1_ipoib_circ_buf *tx_ring; hfi1_ipoib_send_dma_single() local 512 struct hfi1_ipoib_circ_buf *tx_ring; hfi1_ipoib_send_dma_list() local 687 struct hfi1_ipoib_circ_buf *tx_ring; hfi1_ipoib_txreq_init() local 800 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; hfi1_ipoib_txreq_deinit() local [all...] |
/linux/drivers/net/ethernet/intel/fm10k/ |
H A D | fm10k_main.c | 742 static int fm10k_tso(struct fm10k_ring *tx_ring, in fm10k_tso() argument 775 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tso() 782 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; in fm10k_tso() 784 netdev_err(tx_ring->netdev, in fm10k_tso() 789 static void fm10k_tx_csum(struct fm10k_ring *tx_ring, in fm10k_tx_csum() argument 811 dev_warn(tx_ring->dev, in fm10k_tx_csum() 813 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum() 854 dev_warn(tx_ring->dev, in fm10k_tx_csum() 859 tx_ring in fm10k_tx_csum() 891 fm10k_tx_desc_push(struct fm10k_ring * tx_ring,struct fm10k_tx_desc * tx_desc,u16 i,dma_addr_t dma,unsigned int size,u8 desc_flags) fm10k_tx_desc_push() argument 908 __fm10k_maybe_stop_tx(struct fm10k_ring * tx_ring,u16 size) __fm10k_maybe_stop_tx() argument 925 fm10k_maybe_stop_tx(struct fm10k_ring * tx_ring,u16 size) fm10k_maybe_stop_tx() argument 932 fm10k_tx_map(struct fm10k_ring * tx_ring,struct fm10k_tx_buffer * first) fm10k_tx_map() argument 1052 fm10k_xmit_frame_ring(struct sk_buff * skb,struct fm10k_ring * tx_ring) fm10k_xmit_frame_ring() argument 1129 fm10k_check_tx_hang(struct fm10k_ring * tx_ring) fm10k_check_tx_hang() argument 1179 fm10k_clean_tx_irq(struct fm10k_q_vector * q_vector,struct fm10k_ring * tx_ring,int napi_budget) fm10k_clean_tx_irq() argument [all...] |
/linux/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_txrx_common.h | 42 * @tx_ring: Tx ring to update 46 static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring, in i40e_update_tx_stats() argument 50 u64_stats_update_begin(&tx_ring->syncp); in i40e_update_tx_stats() 51 tx_ring->stats.bytes += total_bytes; in i40e_update_tx_stats() 52 tx_ring->stats.packets += total_packets; in i40e_update_tx_stats() 53 u64_stats_update_end(&tx_ring->syncp); in i40e_update_tx_stats() 54 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_update_tx_stats() 55 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_update_tx_stats() 62 * @tx_ring: Tx ring to update 66 static inline void i40e_arm_wb(struct i40e_ring *tx_ring, in i40e_arm_wb() argument [all...] |
H A D | i40e_txrx.c | 18 * @tx_ring: Tx ring to send buffer on 23 static void i40e_fdir(struct i40e_ring *tx_ring, in i40e_fdir() argument 27 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir() 32 i = tx_ring->next_to_use; in i40e_fdir() 33 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_fdir() 36 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir() 88 struct i40e_ring *tx_ring; in i40e_program_fdir_filter() local 100 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter() 101 dev = tx_ring in i40e_program_fdir_filter() 783 i40e_clean_tx_ring(struct i40e_ring * tx_ring) i40e_clean_tx_ring() argument 823 i40e_free_tx_resources(struct i40e_ring * tx_ring) i40e_free_tx_resources() argument 874 struct i40e_ring *tx_ring = NULL; i40e_detect_recover_hung() local 928 i40e_clean_tx_irq(struct i40e_vsi * vsi,struct i40e_ring * tx_ring,int napi_budget,unsigned int * tx_cleaned) i40e_clean_tx_irq() argument 1416 i40e_setup_tx_descriptors(struct i40e_ring * tx_ring) i40e_setup_tx_descriptors() argument 2861 i40e_atr(struct i40e_ring * tx_ring,struct sk_buff * skb,u32 tx_flags) i40e_atr() argument 3005 i40e_tx_prepare_vlan_flags(struct sk_buff * skb,struct i40e_ring * tx_ring,u32 * flags) i40e_tx_prepare_vlan_flags() argument 3201 i40e_tsyn(struct i40e_ring * tx_ring,struct sk_buff * skb,u32 tx_flags,u64 * cd_type_cmd_tso_mss) i40e_tsyn() argument 3247 i40e_tx_enable_csum(struct sk_buff * skb,u32 * tx_flags,u32 * td_cmd,u32 * td_offset,struct i40e_ring * tx_ring,u32 * cd_tunneling) i40e_tx_enable_csum() argument 3428 i40e_create_tx_ctx(struct i40e_ring * tx_ring,const u64 cd_type_cmd_tso_mss,const u32 cd_tunneling,const u32 cd_l2tag2) i40e_create_tx_ctx() argument 3459 __i40e_maybe_stop_tx(struct i40e_ring * tx_ring,int size) __i40e_maybe_stop_tx() argument 3573 i40e_tx_map(struct i40e_ring * tx_ring,struct sk_buff * skb,struct i40e_tx_buffer * first,u32 tx_flags,const u8 hdr_len,u32 td_cmd,u32 td_offset) i40e_tx_map() argument 3872 i40e_xmit_frame_ring(struct sk_buff * skb,struct i40e_ring * tx_ring) i40e_xmit_frame_ring() argument 3983 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; i40e_lan_xmit_frame() local [all...] |
/linux/drivers/net/can/spi/mcp251xfd/ |
H A D | mcp251xfd-tx.c | 21 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring) in mcp251xfd_get_tx_obj_next() argument 25 tx_head = mcp251xfd_get_tx_head(tx_ring); in mcp251xfd_get_tx_obj_next() 27 return &tx_ring->obj[tx_head]; in mcp251xfd_get_tx_obj_next() 135 struct mcp251xfd_tx_ring *tx_ring, in mcp251xfd_tx_failure_drop() argument 143 tx_ring->head--; in mcp251xfd_tx_failure_drop() 145 tx_head = mcp251xfd_get_tx_head(tx_ring); in mcp251xfd_tx_failure_drop() 159 struct mcp251xfd_tx_ring *tx_ring = priv->tx; in mcp251xfd_tx_obj_write_sync() local 164 mcp251xfd_tx_failure_drop(priv, tx_ring, err); in mcp251xfd_tx_obj_write_sync() 174 struct mcp251xfd_tx_ring *tx_ring) in mcp251xfd_tx_busy() argument 176 if (mcp251xfd_get_tx_free(tx_ring) > in mcp251xfd_tx_busy() 207 struct mcp251xfd_tx_ring *tx_ring = priv->tx; mcp251xfd_start_xmit() local [all...] |
H A D | mcp251xfd-tef.c | 119 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; in mcp251xfd_get_tef_len() local 120 const u8 shift = tx_ring->obj_num_shift_to_u8; in mcp251xfd_get_tef_len() 134 mcp251xfd_get_tx_free(tx_ring) == 0) { in mcp251xfd_get_tef_len() 135 *len_p = tx_ring->obj_num; in mcp251xfd_get_tef_len() 150 BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail)); in mcp251xfd_get_tef_len() 151 BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail)); in mcp251xfd_get_tef_len() 152 BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len)); in mcp251xfd_get_tef_len() 174 mcp251xfd_get_tx_free(tx_ring) == 0) in mcp251xfd_get_tef_len() 175 len = tx_ring->obj_num; in mcp251xfd_get_tef_len() 187 const struct mcp251xfd_tx_ring *tx_ring in mcp251xfd_tef_obj_read() local 257 struct mcp251xfd_tx_ring *tx_ring = priv->tx; mcp251xfd_handle_tefif() local [all...] |
/linux/drivers/net/ethernet/freescale/enetc/ |
H A D | enetc.c | 86 struct enetc_bdr *tx_ring) in enetc_rx_ring_from_xdp_tx_ring() argument 88 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; in enetc_rx_ring_from_xdp_tx_ring() 110 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, in enetc_unmap_tx_buff() argument 118 dma_unmap_page(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff() 122 dma_unmap_single(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff() 127 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, in enetc_free_tx_frame() argument 134 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_free_tx_frame() 146 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) in enetc_update_tx_ring_tail() argument 149 enetc_wr_reg_hot(tx_ring in enetc_update_tx_ring_tail() 212 enetc_unwind_tx_frame(struct enetc_bdr * tx_ring,int count,int i) enetc_unwind_tx_frame() argument 224 enetc_map_tx_buffs(struct enetc_bdr * tx_ring,struct sk_buff * skb) enetc_map_tx_buffs() argument 461 enetc_map_tx_tso_hdr(struct enetc_bdr * tx_ring,struct sk_buff * skb,struct enetc_tx_swbd * tx_swbd,union enetc_tx_bd * txbd,int * i,int hdr_len,int data_len) enetc_map_tx_tso_hdr() argument 518 enetc_map_tx_tso_data(struct enetc_bdr * tx_ring,struct sk_buff * skb,struct enetc_tx_swbd * tx_swbd,union enetc_tx_bd * txbd,char * data,int size,bool last_bd) enetc_map_tx_tso_data() argument 584 enetc_tso_complete_csum(struct enetc_bdr * tx_ring,struct tso_t * tso,struct sk_buff * skb,char * hdr,int len,__wsum sum) enetc_tso_complete_csum() argument 646 enetc_lso_map_hdr(struct enetc_bdr * tx_ring,struct sk_buff * skb,int * i,struct enetc_lso_t * lso) enetc_lso_map_hdr() argument 730 enetc_lso_map_data(struct enetc_bdr * tx_ring,struct sk_buff * skb,int * i,struct enetc_lso_t * lso,int * count) enetc_lso_map_data() argument 801 enetc_lso_hw_offload(struct enetc_bdr * tx_ring,struct sk_buff * skb) enetc_lso_hw_offload() argument 838 enetc_map_tx_tso_buffs(struct enetc_bdr * tx_ring,struct sk_buff * skb) enetc_map_tx_tso_buffs() argument 942 struct enetc_bdr *tx_ring; enetc_start_xmit() local 1082 enetc_bd_ready_count(struct enetc_bdr * tx_ring,int ci) enetc_bd_ready_count() argument 1133 enetc_recycle_xdp_tx_buff(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * tx_swbd) enetc_recycle_xdp_tx_buff() argument 1172 enetc_clean_tx_ring(struct enetc_bdr * tx_ring,int napi_budget) enetc_clean_tx_ring() argument 1607 enetc_xdp_map_tx_buff(struct enetc_bdr * tx_ring,int i,struct enetc_tx_swbd * tx_swbd,int frm_len) enetc_xdp_map_tx_buff() argument 1626 enetc_xdp_tx(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * xdp_tx_arr,int num_tx_swbd) enetc_xdp_tx() argument 1662 enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * xdp_tx_arr,struct xdp_frame * xdp_frame) enetc_xdp_frame_to_xdp_tx_swbd() argument 1734 struct enetc_bdr *tx_ring; enetc_xdp_xmit() local 1905 struct enetc_bdr *tx_ring; enetc_clean_rx_ring_xdp() local 2212 struct enetc_bdr *tx_ring = priv->tx_ring[i]; enetc_alloc_tx_resources() local 2314 enetc_assign_tx_resource(struct enetc_bdr * tx_ring,const struct enetc_bdr_resource * res) enetc_assign_tx_resource() argument 2364 enetc_free_tx_ring(struct enetc_bdr * tx_ring) enetc_free_tx_ring() argument 2513 enetc_setup_txbdr(struct enetc_hw * hw,struct enetc_bdr * tx_ring) enetc_setup_txbdr() argument 2611 enetc_enable_txbdr(struct enetc_hw * hw,struct enetc_bdr * tx_ring) enetc_enable_txbdr() argument 2683 enetc_wait_txbdr(struct enetc_hw * hw,struct enetc_bdr * tx_ring) enetc_wait_txbdr() argument 3073 struct enetc_bdr *tx_ring; enetc_reset_tc_mqprio() local 3103 struct enetc_bdr *tx_ring; enetc_setup_tc_mqprio() local [all...] |
/linux/drivers/net/ethernet/intel/igbvf/ |
H A D | netdev.c | 412 * @tx_ring: ring being initialized 417 struct igbvf_ring *tx_ring) in igbvf_setup_tx_resources() argument 422 size = sizeof(struct igbvf_buffer) * tx_ring->count; in igbvf_setup_tx_resources() 423 tx_ring->buffer_info = vzalloc(size); in igbvf_setup_tx_resources() 424 if (!tx_ring->buffer_info) in igbvf_setup_tx_resources() 428 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); in igbvf_setup_tx_resources() 429 tx_ring->size = ALIGN(tx_ring->size, 4096); in igbvf_setup_tx_resources() 431 tx_ring in igbvf_setup_tx_resources() 496 igbvf_clean_tx_ring(struct igbvf_ring * tx_ring) igbvf_clean_tx_ring() argument 531 igbvf_free_tx_resources(struct igbvf_ring * tx_ring) igbvf_free_tx_resources() argument 772 igbvf_clean_tx_irq(struct igbvf_ring * tx_ring) igbvf_clean_tx_irq() argument 872 struct igbvf_ring *tx_ring = adapter->tx_ring; igbvf_intr_msix_tx() local 974 struct igbvf_ring *tx_ring = adapter->tx_ring; igbvf_configure_msix() local 1287 struct igbvf_ring *tx_ring = adapter->tx_ring; igbvf_configure_tx() local 1909 struct igbvf_ring *tx_ring = adapter->tx_ring; igbvf_watchdog_task() local 1968 igbvf_tx_ctxtdesc(struct igbvf_ring * tx_ring,u32 vlan_macip_lens,u32 type_tucmd,u32 mss_l4len_idx) igbvf_tx_ctxtdesc() argument 1993 igbvf_tso(struct igbvf_ring * tx_ring,struct sk_buff * skb,u32 tx_flags,u8 * hdr_len) igbvf_tso() argument 2067 igbvf_tx_csum(struct igbvf_ring * tx_ring,struct sk_buff * skb,u32 tx_flags,__be16 protocol) igbvf_tx_csum() argument 2138 igbvf_tx_map_adv(struct igbvf_adapter * adapter,struct igbvf_ring * tx_ring,struct sk_buff * skb) igbvf_tx_map_adv() argument 2210 igbvf_tx_queue_adv(struct igbvf_adapter * adapter,struct igbvf_ring * tx_ring,int tx_flags,int count,unsigned int first,u32 paylen,u8 hdr_len) igbvf_tx_queue_adv() argument 2270 igbvf_xmit_frame_ring_adv(struct sk_buff * skb,struct net_device * netdev,struct igbvf_ring * tx_ring) igbvf_xmit_frame_ring_adv() argument 2346 struct igbvf_ring *tx_ring; igbvf_xmit_frame() local [all...] |
/linux/drivers/net/ethernet/intel/igc/ |
H A D | igc_xdp.c | 31 igc_disable_tx_ring(adapter->tx_ring[i]); in igc_xdp_set_prog() 48 igc_enable_tx_ring(adapter->tx_ring[i]); in igc_xdp_set_prog() 61 struct igc_ring *rx_ring, *tx_ring; in igc_xdp_enable_pool() local 90 tx_ring = adapter->tx_ring[queue_id]; in igc_xdp_enable_pool() 96 igc_disable_tx_ring(tx_ring); in igc_xdp_enable_pool() 101 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); in igc_xdp_enable_pool() 106 igc_enable_tx_ring(tx_ring); in igc_xdp_enable_pool() 120 struct igc_ring *rx_ring, *tx_ring; in igc_xdp_disable_pool() local 136 tx_ring in igc_xdp_disable_pool() [all...] |
H A D | igc_dump.c | 118 struct igc_ring *tx_ring; in igc_rings_dump() local 138 tx_ring = adapter->tx_ring[n]; in igc_rings_dump() 139 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in igc_rings_dump() 142 n, tx_ring->next_to_use, tx_ring->next_to_clean, in igc_rings_dump() 167 tx_ring = adapter->tx_ring[n]; in igc_rings_dump() 170 tx_ring in igc_rings_dump() [all...] |
/linux/drivers/net/ethernet/qlogic/qlcnic/ |
H A D | qlcnic_io.c | 271 u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) in qlcnic_82xx_change_filter() argument 280 producer = tx_ring->producer; in qlcnic_82xx_change_filter() 281 hwdesc = &tx_ring->desc_head[tx_ring->producer]; in qlcnic_82xx_change_filter() 297 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_82xx_change_filter() 304 struct qlcnic_host_tx_ring *tx_ring) in qlcnic_send_filter() argument 338 vlan_id, tx_ring); in qlcnic_send_filter() 353 qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring); in qlcnic_send_filter() 373 struct qlcnic_host_tx_ring *tx_ring) in qlcnic_tx_encap_pkt() argument 459 qlcnic_tx_pkt(struct qlcnic_adapter * adapter,struct cmd_desc_type0 * first_desc,struct sk_buff * skb,struct qlcnic_host_tx_ring * tx_ring) qlcnic_tx_pkt() argument 651 struct qlcnic_host_tx_ring *tx_ring; qlcnic_xmit_frame() local 886 qlcnic_process_cmd_ring(struct qlcnic_adapter * adapter,struct qlcnic_host_tx_ring * tx_ring,int budget) qlcnic_process_cmd_ring() argument 964 struct qlcnic_host_tx_ring *tx_ring; qlcnic_poll() local 991 struct qlcnic_host_tx_ring *tx_ring; qlcnic_tx_poll() local 1579 struct qlcnic_host_tx_ring *tx_ring; qlcnic_82xx_napi_add() local 1621 struct qlcnic_host_tx_ring *tx_ring; qlcnic_82xx_napi_del() local 1644 struct qlcnic_host_tx_ring *tx_ring; qlcnic_82xx_napi_enable() local 1671 struct qlcnic_host_tx_ring *tx_ring; qlcnic_82xx_napi_disable() local 1954 struct qlcnic_host_tx_ring *tx_ring; qlcnic_83xx_msix_sriov_vf_poll() local 1982 struct qlcnic_host_tx_ring *tx_ring; qlcnic_83xx_poll() local 2007 struct qlcnic_host_tx_ring *tx_ring; qlcnic_83xx_msix_tx_poll() local 2047 struct qlcnic_host_tx_ring *tx_ring; qlcnic_83xx_napi_enable() local 2075 struct qlcnic_host_tx_ring *tx_ring; qlcnic_83xx_napi_disable() local 2104 struct qlcnic_host_tx_ring *tx_ring; qlcnic_83xx_napi_add() local 2148 struct qlcnic_host_tx_ring *tx_ring; qlcnic_83xx_napi_del() local [all...] |
/linux/drivers/net/ethernet/wangxun/libwx/ |
H A D | wx_lib.c | 707 * @tx_ring: tx ring to clean 711 struct wx_ring *tx_ring, int napi_budget) in wx_clean_tx_irq() argument 715 struct wx *wx = netdev_priv(tx_ring->netdev); in wx_clean_tx_irq() 716 unsigned int i = tx_ring->next_to_clean; in wx_clean_tx_irq() 720 if (!netif_carrier_ok(tx_ring->netdev)) in wx_clean_tx_irq() 723 tx_buffer = &tx_ring->tx_buffer_info[i]; in wx_clean_tx_irq() 724 tx_desc = WX_TX_DESC(tx_ring, i); in wx_clean_tx_irq() 725 i -= tx_ring->count; in wx_clean_tx_irq() 757 dma_unmap_single(tx_ring->dev, in wx_clean_tx_irq() 771 i -= tx_ring in wx_clean_tx_irq() 888 wx_maybe_stop_tx(struct wx_ring * tx_ring,u16 size) wx_maybe_stop_tx() argument 948 wx_tx_map(struct wx_ring * tx_ring,struct wx_tx_buffer * first,const u8 hdr_len) wx_tx_map() argument 1084 wx_tx_ctxtdesc(struct wx_ring * tx_ring,u32 vlan_macip_lens,u32 fcoe_sof_eof,u32 type_tucmd,u32 mss_l4len_idx) wx_tx_ctxtdesc() argument 1218 wx_tso(struct wx_ring * tx_ring,struct wx_tx_buffer * first,u8 * hdr_len,u8 ptype) wx_tso() argument 1347 wx_tx_csum(struct wx_ring * tx_ring,struct wx_tx_buffer * first,u8 ptype) wx_tx_csum() argument 1484 wx_xmit_frame_ring(struct sk_buff * skb,struct wx_ring * tx_ring) wx_xmit_frame_ring() argument 1573 struct wx_ring *tx_ring; wx_xmit_frame() local 2514 wx_clean_tx_ring(struct wx_ring * tx_ring) wx_clean_tx_ring() argument 2591 wx_free_tx_resources(struct wx_ring * tx_ring) wx_free_tx_resources() argument 2748 wx_setup_tx_resources(struct wx_ring * tx_ring) wx_setup_tx_resources() argument [all...] |
/linux/drivers/net/ethernet/mscc/ |
H A D | ocelot_fdma.c | 69 struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_ring_free() local 71 if (tx_ring->next_to_use >= tx_ring->next_to_clean) in ocelot_fdma_tx_ring_free() 73 (tx_ring->next_to_use - tx_ring->next_to_clean) - 1; in ocelot_fdma_tx_ring_free() 75 return tx_ring->next_to_clean - tx_ring->next_to_use - 1; in ocelot_fdma_tx_ring_free() 80 struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_ring_empty() local 484 struct ocelot_fdma_tx_ring *tx_ring; ocelot_fdma_tx_cleanup() local 610 struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; ocelot_fdma_send_skb() local 724 struct ocelot_fdma_tx_ring *tx_ring; ocelot_fdma_free_tx_ring() local [all...] |
/linux/drivers/net/ethernet/broadcom/ |
H A D | bcm4908_enet.c | 85 struct bcm4908_enet_dma_ring tx_ring; member 198 struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; in bcm4908_enet_dma_free() local 208 size = tx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd); in bcm4908_enet_dma_free() 209 if (tx_ring->cpu_addr) in bcm4908_enet_dma_free() 210 dma_free_coherent(dev, size, tx_ring->cpu_addr, tx_ring->dma_addr); in bcm4908_enet_dma_free() 211 kfree(tx_ring->slots); in bcm4908_enet_dma_free() 216 struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; in bcm4908_enet_dma_alloc() local 451 struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; bcm4908_enet_open() local 501 struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; bcm4908_enet_stop() local 653 struct bcm4908_enet_dma_ring *tx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi); bcm4908_enet_poll_tx() local [all...] |
/linux/drivers/net/ethernet/intel/ixgbevf/ |
H A D | ixgbevf_main.c | 214 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring) in ixgbevf_check_tx_hang() argument 216 u32 tx_done = ixgbevf_get_tx_completed(tx_ring); in ixgbevf_check_tx_hang() 217 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbevf_check_tx_hang() 218 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring); in ixgbevf_check_tx_hang() 220 clear_check_for_tx_hang(tx_ring); in ixgbevf_check_tx_hang() 230 &tx_ring->state); in ixgbevf_check_tx_hang() 233 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); in ixgbevf_check_tx_hang() 236 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbevf_check_tx_hang() 265 * @tx_ring: tx ring to clean 269 struct ixgbevf_ring *tx_ring, in in ixgbevf_clean_tx_irq() argument 2407 ixgbevf_clean_tx_ring(struct ixgbevf_ring * tx_ring) ixgbevf_clean_tx_ring() argument 3375 ixgbevf_free_tx_resources(struct ixgbevf_ring * tx_ring) ixgbevf_free_tx_resources() argument 3416 ixgbevf_setup_tx_resources(struct ixgbevf_ring * tx_ring) ixgbevf_setup_tx_resources() argument 3751 ixgbevf_tx_ctxtdesc(struct ixgbevf_ring * tx_ring,u32 vlan_macip_lens,u32 fceof_saidx,u32 type_tucmd,u32 mss_l4len_idx) ixgbevf_tx_ctxtdesc() argument 3772 ixgbevf_tso(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first,u8 * hdr_len,struct ixgbevf_ipsec_tx_data * itd) ixgbevf_tso() argument 3869 ixgbevf_tx_csum(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first,struct ixgbevf_ipsec_tx_data * itd) ixgbevf_tx_csum() argument 3965 ixgbevf_tx_map(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first,const u8 hdr_len) ixgbevf_tx_map() argument 4102 __ixgbevf_maybe_stop_tx(struct ixgbevf_ring * tx_ring,int size) __ixgbevf_maybe_stop_tx() argument 4124 ixgbevf_maybe_stop_tx(struct ixgbevf_ring * tx_ring,int size) ixgbevf_maybe_stop_tx() argument 4132 ixgbevf_xmit_frame_ring(struct sk_buff * skb,struct ixgbevf_ring * tx_ring) ixgbevf_xmit_frame_ring() argument 4212 struct ixgbevf_ring *tx_ring; ixgbevf_xmit_frame() local [all...] |