Lines Matching refs:tx_ring
77 * @tx_ring: ring to be cleaned
79 static void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
85 if (!tx_ring->tx_bi)
89 for (i = 0; i < tx_ring->count; i++)
90 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
92 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
93 memset(tx_ring->tx_bi, 0, bi_size);
96 memset(tx_ring->desc, 0, tx_ring->size);
98 tx_ring->next_to_use = 0;
99 tx_ring->next_to_clean = 0;
101 if (!tx_ring->netdev)
105 netdev_tx_reset_queue(txring_txq(tx_ring));
110 * @tx_ring: Tx descriptor ring for a specific queue
114 void iavf_free_tx_resources(struct iavf_ring *tx_ring)
116 iavf_clean_tx_ring(tx_ring);
117 kfree(tx_ring->tx_bi);
118 tx_ring->tx_bi = NULL;
120 if (tx_ring->desc) {
121 dma_free_coherent(tx_ring->dev, tx_ring->size,
122 tx_ring->desc, tx_ring->dma);
123 tx_ring->desc = NULL;
179 struct iavf_ring *tx_ring = NULL;
198 tx_ring = &vsi->back->tx_rings[i];
199 if (tx_ring && tx_ring->desc) {
207 packets = tx_ring->stats.packets & INT_MAX;
208 if (tx_ring->prev_pkt_ctr == packets) {
209 iavf_force_wb(vsi, tx_ring->q_vector);
217 tx_ring->prev_pkt_ctr =
218 iavf_get_tx_pending(tx_ring, true) ? packets : -1;
228 * @tx_ring: Tx ring to clean
234 struct iavf_ring *tx_ring, int napi_budget)
236 int i = tx_ring->next_to_clean;
242 tx_buf = &tx_ring->tx_bi[i];
243 tx_desc = IAVF_TX_DESC(tx_ring, i);
244 i -= tx_ring->count;
256 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
273 dma_unmap_single(tx_ring->dev,
285 tx_ring, tx_desc, tx_buf);
291 i -= tx_ring->count;
292 tx_buf = tx_ring->tx_bi;
293 tx_desc = IAVF_TX_DESC(tx_ring, 0);
298 dma_unmap_page(tx_ring->dev,
311 i -= tx_ring->count;
312 tx_buf = tx_ring->tx_bi;
313 tx_desc = IAVF_TX_DESC(tx_ring, 0);
322 i += tx_ring->count;
323 tx_ring->next_to_clean = i;
324 u64_stats_update_begin(&tx_ring->syncp);
325 tx_ring->stats.bytes += total_bytes;
326 tx_ring->stats.packets += total_packets;
327 u64_stats_update_end(&tx_ring->syncp);
328 tx_ring->q_vector->tx.total_bytes += total_bytes;
329 tx_ring->q_vector->tx.total_packets += total_packets;
331 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
337 unsigned int j = iavf_get_tx_pending(tx_ring, false);
342 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
343 tx_ring->flags |= IAVF_TXR_FLAGS_ARM_WB;
347 netdev_tx_completed_queue(txring_txq(tx_ring),
351 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
352 (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
357 if (__netif_subqueue_stopped(tx_ring->netdev,
358 tx_ring->queue_index) &&
360 netif_wake_subqueue(tx_ring->netdev,
361 tx_ring->queue_index);
362 ++tx_ring->tx_stats.restart_queue;
666 * @tx_ring: the tx ring to set up
670 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
672 struct device *dev = tx_ring->dev;
679 WARN_ON(tx_ring->tx_bi);
680 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
681 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
682 if (!tx_ring->tx_bi)
686 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
687 tx_ring->size = ALIGN(tx_ring->size, 4096);
688 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
689 &tx_ring->dma, GFP_KERNEL);
690 if (!tx_ring->desc) {
692 tx_ring->size);
696 tx_ring->next_to_use = 0;
697 tx_ring->next_to_clean = 0;
698 tx_ring->prev_pkt_ctr = -1;
702 kfree(tx_ring->tx_bi);
703 tx_ring->tx_bi = NULL;
1687 * @tx_ring: ring to send buffer on
1697 struct iavf_ring *tx_ring, u32 *flags)
1709 if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) {
1711 } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
1714 dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n");
1842 * @tx_ring: Tx descriptor ring
1847 struct iavf_ring *tx_ring,
2004 * @tx_ring: ring to create the descriptor on
2009 static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
2014 int i = tx_ring->next_to_use;
2021 context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
2024 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2119 * @tx_ring: the ring to be checked
2124 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
2126 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2131 if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
2135 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2136 ++tx_ring->tx_stats.restart_queue;
2142 * @tx_ring: ring to send buffer on
2150 static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
2159 u16 i = tx_ring->next_to_use;
2170 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2172 tx_desc = IAVF_TX_DESC(tx_ring, i);
2178 if (dma_mapping_error(tx_ring->dev, dma))
2197 if (i == tx_ring->count) {
2198 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2218 if (i == tx_ring->count) {
2219 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2226 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2229 tx_bi = &tx_ring->tx_bi[i];
2232 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2235 if (i == tx_ring->count)
2238 tx_ring->next_to_use = i;
2240 iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2261 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
2262 writel(i, tx_ring->tail);
2268 dev_info(tx_ring->dev, "TX DMA map failed\n");
2272 tx_bi = &tx_ring->tx_bi[i];
2273 iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
2277 i = tx_ring->count;
2281 tx_ring->next_to_use = i;
2287 * @tx_ring: ring to send buffer on
2292 struct iavf_ring *tx_ring)
2307 iavf_trace(xmit_frame_ring, skb, tx_ring);
2316 tx_ring->tx_stats.tx_linearize++;
2325 if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2326 tx_ring->tx_stats.tx_busy++;
2331 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2337 iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags);
2362 tx_ring, &cd_tunneling);
2369 iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2372 iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2378 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
2394 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
2406 return iavf_xmit_frame_ring(skb, tx_ring);